Merge remote-tracking branch 'media_tree/vsp1' into generic-zpos-v8
authorBenjamin Gaignard <benjamin.gaignard@linaro.org>
Fri, 29 Jul 2016 07:38:55 +0000 (09:38 +0200)
committerBenjamin Gaignard <benjamin.gaignard@linaro.org>
Fri, 29 Jul 2016 07:38:55 +0000 (09:38 +0200)
2219 files changed:
.mailmap
Documentation/.gitignore [new file with mode: 0644]
Documentation/ABI/testing/configfs-usb-gadget-uvc
Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
Documentation/DocBook/Makefile
Documentation/DocBook/device-drivers.tmpl
Documentation/DocBook/gpu.tmpl [deleted file]
Documentation/Makefile.sphinx [new file with mode: 0644]
Documentation/arm64/silicon-errata.txt
Documentation/conf.py [new file with mode: 0644]
Documentation/devicetree/bindings/display/arm,malidp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
Documentation/devicetree/bindings/display/bridge/sii902x.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/connector/hdmi-connector.txt
Documentation/devicetree/bindings/display/fsl,dcu.txt
Documentation/devicetree/bindings/display/imx/ldb.txt
Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/msm/dsi.txt
Documentation/devicetree/bindings/display/msm/mdp.txt [deleted file]
Documentation/devicetree/bindings/display/msm/mdp4.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/msm/mdp5.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/panel-dpi.txt
Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
Documentation/devicetree/bindings/hwmon/ina2xx.txt
Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt
Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt
Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt [new file with mode: 0644]
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/dmaengine/provider.txt
Documentation/filesystems/devpts.txt
Documentation/gdb-kernel-debugging.txt
Documentation/gpu/drm-internals.rst [new file with mode: 0644]
Documentation/gpu/drm-kms-helpers.rst [new file with mode: 0644]
Documentation/gpu/drm-kms.rst [new file with mode: 0644]
Documentation/gpu/drm-mm.rst [new file with mode: 0644]
Documentation/gpu/drm-uapi.rst [new file with mode: 0644]
Documentation/gpu/i915.rst [new file with mode: 0644]
Documentation/gpu/index.rst [new file with mode: 0644]
Documentation/gpu/introduction.rst [new file with mode: 0644]
Documentation/gpu/kms-properties.csv [new file with mode: 0644]
Documentation/gpu/vga-switcheroo.rst [new file with mode: 0644]
Documentation/index.rst [new file with mode: 0644]
Documentation/kdump/gdbmacros.txt
Documentation/kernel-parameters.txt
Documentation/leds/leds-class.txt
Documentation/mic/mpssd/mpssd.c
Documentation/networking/dsa/dsa.txt
Documentation/networking/ip-sysctl.txt
Documentation/scsi/scsi_eh.txt
Documentation/security/keys.txt
Documentation/security/self-protection.txt
Documentation/sphinx/convert_template.sed [new file with mode: 0644]
Documentation/sphinx/kernel-doc.py [new file with mode: 0644]
Documentation/sphinx/post_convert.sed [new file with mode: 0644]
Documentation/sphinx/tmplcvt [new file with mode: 0755]
Documentation/sync_file.txt
Documentation/x86/intel_mpx.txt
Documentation/x86/tlb.txt
Documentation/x86/x86_64/machinecheck
Documentation/zh_CN/CodingStyle
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/pgalloc.h
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/abilis_tb100.dtsi
arch/arc/boot/dts/abilis_tb101.dtsi
arch/arc/boot/dts/axc001.dtsi
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/eznps.dts
arch/arc/boot/dts/nsim_700.dts
arch/arc/boot/dts/nsimosci.dts
arch/arc/boot/dts/nsimosci_hs.dts
arch/arc/boot/dts/nsimosci_hs_idu.dts
arch/arc/boot/dts/skeleton.dtsi
arch/arc/boot/dts/skeleton_hs.dtsi
arch/arc/boot/dts/skeleton_hs_idu.dtsi
arch/arc/boot/dts/vdk_axc003.dtsi
arch/arc/boot/dts/vdk_axc003_idu.dtsi
arch/arc/boot/dts/vdk_axs10x_mb.dtsi
arch/arc/boot/dts/vdk_hs38_smp.dts
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/atomic.h
arch/arc/include/asm/entry-compact.h
arch/arc/include/asm/mmu_context.h
arch/arc/include/asm/pgalloc.h
arch/arc/include/asm/pgtable.h
arch/arc/include/asm/processor.h
arch/arc/include/asm/smp.h
arch/arc/include/asm/spinlock.h
arch/arc/include/asm/thread_info.h
arch/arc/include/asm/uaccess.h
arch/arc/include/uapi/asm/swab.h
arch/arc/kernel/entry-compact.S
arch/arc/kernel/intc-compact.c
arch/arc/kernel/perf_event.c
arch/arc/kernel/setup.c
arch/arc/kernel/signal.c
arch/arc/kernel/stacktrace.c
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/am57xx-idk-common.dtsi
arch/arm/boot/dts/armada-385-linksys.dtsi
arch/arm/boot/dts/dm8148-evm.dts
arch/arm/boot/dts/dm8148-t410.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra74x.dtsi
arch/arm/boot/dts/exynos5250-snow-common.dtsi
arch/arm/boot/dts/exynos5420-peach-pit.dts
arch/arm/boot/dts/omap3-evm-37xx.dts
arch/arm/boot/dts/omap3-igep.dtsi
arch/arm/boot/dts/omap3-igep0020-common.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/omap3-zoom3.dts
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap5-igep0050.dts
arch/arm/boot/dts/omap5-uevm.dts
arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-r8-chip.dts
arch/arm/boot/dts/sun6i-a31s-primo81.dts
arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/tegra30-beaver.dts
arch/arm/configs/exynos_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/include/asm/pgalloc.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/kernel/ptrace.c
arch/arm/kernel/smp.c
arch/arm/kvm/arm.c
arch/arm/mach-exynos/Kconfig
arch/arm/mach-imx/mach-imx6ul.c
arch/arm/mach-mvebu/Makefile
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-omap1/ams-delta-fiq-handler.S
arch/arm/mach-omap1/ams-delta-fiq.c
arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/board-ldp.c
arch/arm/mach-omap2/board-rx51-video.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/display.h
arch/arm/mach-omap2/dss-common.c
arch/arm/mach-omap2/omap-secure.h
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/powerdomain.c
arch/arm/mach-omap2/powerdomains7xx_data.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-vexpress/spc.c
arch/arm/plat-samsung/devs.c
arch/arm64/Kconfig
arch/arm64/Kconfig.debug
arch/arm64/Makefile
arch/arm64/boot/dts/lg/lg1312.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/elf.h
arch/arm64/include/asm/kgdb.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/smp.h
arch/arm64/include/asm/spinlock.h
arch/arm64/include/asm/uaccess.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/traps.c
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/context.c
arch/arm64/mm/dump.c
arch/arm64/mm/fault.c
arch/arm64/mm/flush.c
arch/arm64/mm/hugetlbpage.c
arch/avr32/include/asm/pgalloc.h
arch/cris/include/asm/pgalloc.h
arch/frv/mm/pgalloc.c
arch/hexagon/include/asm/pgalloc.h
arch/ia64/Kconfig
arch/ia64/include/asm/thread_info.h
arch/ia64/kernel/init_task.c
arch/m32r/boot/compressed/m32r_sio.c
arch/m68k/coldfire/head.S
arch/m68k/coldfire/m5272.c
arch/m68k/coldfire/pci.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/ifpsp060/src/fpsp.S
arch/m68k/ifpsp060/src/pfpsp.S
arch/m68k/include/asm/dma.h
arch/m68k/include/asm/m525xsim.h
arch/m68k/include/asm/mcf_pgalloc.h
arch/m68k/include/asm/mcfmmu.h
arch/m68k/include/asm/motorola_pgalloc.h
arch/m68k/include/asm/q40_master.h
arch/m68k/include/asm/sun3_pgalloc.h
arch/m68k/mac/iop.c
arch/m68k/math-emu/fp_decode.h
arch/metag/include/asm/pgalloc.h
arch/microblaze/include/asm/pgalloc.h
arch/microblaze/mm/pgtable.c
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/pgalloc.h
arch/mips/include/asm/pgtable.h
arch/mips/kvm/emulate.c
arch/mips/kvm/interrupt.h
arch/mips/kvm/locore.S
arch/mips/kvm/mips.c
arch/mn10300/include/asm/thread_info.h
arch/mn10300/kernel/kgdb.c
arch/mn10300/mm/pgtable.c
arch/nios2/include/asm/pgalloc.h
arch/openrisc/include/asm/pgalloc.h
arch/openrisc/mm/ioremap.c
arch/parisc/include/asm/pgalloc.h
arch/parisc/include/asm/traps.h
arch/parisc/kernel/processor.c
arch/parisc/kernel/time.c
arch/parisc/kernel/unaligned.c
arch/parisc/kernel/unwind.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/32/pgalloc.h
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/book3s/64/pgalloc.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/book3s/64/radix.h
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/include/asm/book3s/64/tlbflush.h
arch/powerpc/include/asm/book3s/pgalloc.h
arch/powerpc/include/asm/nohash/64/pgalloc.h
arch/powerpc/include/asm/reg.h
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/tm.S
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/platforms/512x/clock-commonclk.c
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/pseries/eeh_pseries.c
arch/powerpc/platforms/pseries/iommu.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/defconfig
arch/s390/include/asm/fpu/api.h
arch/s390/include/asm/kvm_host.h
arch/s390/kernel/ipl.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kvm/intercept.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/fault.c
arch/s390/mm/pgalloc.c
arch/s390/mm/pgtable.c
arch/s390/net/bpf_jit.h
arch/s390/net/bpf_jit_comp.c
arch/score/include/asm/pgalloc.h
arch/sh/include/asm/pgalloc.h
arch/sh/mm/pgtable.c
arch/sparc/include/asm/head_64.h
arch/sparc/include/asm/pgalloc_64.h
arch/sparc/include/asm/ttable.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/rtrap_64.S
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/sigutil_32.c
arch/sparc/kernel/sigutil_64.c
arch/sparc/kernel/urtt_fill.S [new file with mode: 0644]
arch/sparc/mm/init_64.c
arch/tile/include/asm/thread_info.h
arch/tile/kernel/process.c
arch/tile/mm/pgtable.c
arch/um/kernel/mem.c
arch/unicore32/include/asm/pgalloc.h
arch/x86/Kconfig
arch/x86/boot/Makefile
arch/x86/events/core.c
arch/x86/events/intel/Makefile
arch/x86/events/intel/core.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/intel-family.h [new file with mode: 0644]
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/pvclock.h
arch/x86/include/asm/stacktrace.h
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/espfix_64.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/pvclock.c
arch/x86/kernel/traps.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/kasan_init_64.c
arch/x86/mm/pgtable.c
arch/x86/pci/acpi.c
arch/x86/platform/efi/efi_64.c
arch/x86/power/hibernate_64.c
arch/x86/power/hibernate_asm_64.S
arch/x86/xen/mmu.c
arch/x86/xen/p2m.c
arch/xtensa/include/asm/pgalloc.h
block/blk-lib.c
block/blk-mq.c
block/ioprio.c
crypto/asymmetric_keys/Kconfig
crypto/asymmetric_keys/mscode_parser.c
crypto/asymmetric_keys/pkcs7_verify.c
crypto/asymmetric_keys/restrict.c
crypto/crypto_user.c
crypto/rsa-pkcs1pad.c
drivers/acpi/acpi_dbg.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpi_video.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/bus.c
drivers/acpi/ec.c
drivers/acpi/internal.h
drivers/acpi/nfit.c
drivers/acpi/nfit.h
drivers/acpi/pci_link.c
drivers/acpi/processor_throttling.c
drivers/ata/ahci_seattle.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/sata_mv.c
drivers/atm/firestream.c
drivers/atm/iphase.c
drivers/base/Makefile
drivers/base/isa.c
drivers/base/module.c
drivers/base/power/opp/cpu.c
drivers/base/power/opp/of.c
drivers/base/power/opp/opp.h
drivers/bcma/bcma_private.h
drivers/block/aoe/aoecmd.c
drivers/block/nbd.c
drivers/block/xen-blkfront.c
drivers/char/agp/intel-gtt.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/clk/Kconfig
drivers/clk/at91/clk-programmable.c
drivers/clk/clk-oxnas.c
drivers/clk/microchip/clk-pic32mzda.c
drivers/clk/rockchip/clk-cpu.c
drivers/clk/rockchip/clk-mmc-phase.c
drivers/clk/rockchip/clk-rk3399.c
drivers/clk/sunxi/clk-sun4i-display.c
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
drivers/connector/cn_proc.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/pcc-cpufreq.c
drivers/cpuidle/cpuidle.c
drivers/crypto/ccp/ccp-crypto-aes-xts.c
drivers/crypto/omap-sham.c
drivers/crypto/qat/qat_common/Makefile
drivers/crypto/ux500/hash/hash_core.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_ctr.c
drivers/crypto/vmx/ppc-xlate.pl
drivers/devfreq/devfreq.c
drivers/devfreq/event/exynos-nocp.c
drivers/dma-buf/Kconfig
drivers/dma-buf/Makefile
drivers/dma-buf/dma-buf.c
drivers/dma-buf/fence-array.c [new file with mode: 0644]
drivers/dma-buf/fence.c
drivers/dma-buf/reservation.c
drivers/dma-buf/sync_file.c
drivers/dma/at_xdmac.c
drivers/dma/mv_xor.c
drivers/edac/edac_mc.c
drivers/edac/sb_edac.c
drivers/extcon/extcon-palmas.c
drivers/firmware/efi/arm-init.c
drivers/gpio/Kconfig
drivers/gpio/gpio-104-dio-48e.c
drivers/gpio/gpio-104-idi-48.c
drivers/gpio/gpio-bcm-kona.c
drivers/gpio/gpio-lpc32xx.c
drivers/gpio/gpio-sch.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpio-zynq.c
drivers/gpio/gpiolib-legacy.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
drivers/gpu/drm/amd/amdgpu/ci_dpm.c
drivers/gpu/drm/amd/amdgpu/ci_dpm.h
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
drivers/gpu/drm/amd/amdgpu/iceland_smc.c
drivers/gpu/drm/amd/amdgpu/iceland_smum.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h [deleted file]
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/ppsmc.h
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/amd_pcie.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
drivers/gpu/drm/amd/include/atombios.h
drivers/gpu/drm/amd/include/cgs_common.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
drivers/gpu/drm/amd/powerplay/inc/smu74.h
drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
drivers/gpu/drm/amd/powerplay/inc/smumgr.h
drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
drivers/gpu/drm/amd/scheduler/sched_fence.c
drivers/gpu/drm/arc/Kconfig
drivers/gpu/drm/arc/Makefile
drivers/gpu/drm/arc/arcpgu.h
drivers/gpu/drm/arc/arcpgu_crtc.c
drivers/gpu/drm/arc/arcpgu_drv.c
drivers/gpu/drm/arc/arcpgu_hdmi.c
drivers/gpu/drm/arc/arcpgu_sim.c [new file with mode: 0644]
drivers/gpu/drm/arm/Kconfig
drivers/gpu/drm/arm/Makefile
drivers/gpu/drm/arm/hdlcd_crtc.c
drivers/gpu/drm/arm/hdlcd_drv.c
drivers/gpu/drm/arm/hdlcd_drv.h
drivers/gpu/drm/arm/malidp_crtc.c [new file with mode: 0644]
drivers/gpu/drm/arm/malidp_drv.c [new file with mode: 0644]
drivers/gpu/drm/arm/malidp_drv.h [new file with mode: 0644]
drivers/gpu/drm/arm/malidp_hw.c [new file with mode: 0644]
drivers/gpu/drm/arm/malidp_hw.h [new file with mode: 0644]
drivers/gpu/drm/arm/malidp_planes.c [new file with mode: 0644]
drivers/gpu/drm/arm/malidp_regs.h [new file with mode: 0644]
drivers/gpu/drm/armada/Kconfig
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/armada/armada_overlay.c
drivers/gpu/drm/ast/Kconfig
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_fb.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/ast/ast_ttm.c
drivers/gpu/drm/atmel-hlcdc/Kconfig
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bochs/Kconfig
drivers/gpu/drm/bochs/bochs_drv.c
drivers/gpu/drm/bochs/bochs_mm.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/Makefile
drivers/gpu/drm/bridge/adv7511/Kconfig [new file with mode: 0644]
drivers/gpu/drm/bridge/adv7511/Makefile [new file with mode: 0644]
drivers/gpu/drm/bridge/adv7511/adv7511.h [new file with mode: 0644]
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c [new file with mode: 0644]
drivers/gpu/drm/bridge/adv7511/adv7533.c [new file with mode: 0644]
drivers/gpu/drm/bridge/analogix-anx78xx.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
drivers/gpu/drm/bridge/dw-hdmi.c
drivers/gpu/drm/bridge/nxp-ptn3460.c
drivers/gpu/drm/bridge/parade-ps8622.c
drivers/gpu/drm/bridge/sii902x.c [new file with mode: 0644]
drivers/gpu/drm/bridge/tc358767.c [new file with mode: 0644]
drivers/gpu/drm/cirrus/Kconfig
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/cirrus/cirrus_ttm.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_bridge.c
drivers/gpu/drm/drm_bufs.c
drivers/gpu/drm/drm_cache.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_crtc_internal.h
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_dp_aux_dev.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_fourcc.c [new file with mode: 0644]
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_gem_cma_helper.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_legacy.h
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_memory.c
drivers/gpu/drm/drm_mipi_dsi.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_modeset_lock.c
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_platform.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/drm_scatter.c
drivers/gpu/drm/drm_simple_kms_helper.c [new file with mode: 0644]
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/drm_vm.c
drivers/gpu/drm/drm_vma_manager.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
drivers/gpu/drm/etnaviv/state_hi.xml.h
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/exynos/exynos_drm_dpi.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_iommu.c
drivers/gpu/drm/exynos/exynos_drm_iommu.h
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/fsl-dcu/Kconfig
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
drivers/gpu/drm/fsl-dcu/fsl_tcon.c
drivers/gpu/drm/gma500/Kconfig
drivers/gpu/drm/gma500/gma_display.c
drivers/gpu/drm/gma500/gma_display.h
drivers/gpu/drm/gma500/psb_intel_display.c
drivers/gpu/drm/hisilicon/kirin/Kconfig
drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
drivers/gpu/drm/i2c/Kconfig
drivers/gpu/drm/i2c/Makefile
drivers/gpu/drm/i2c/adv7511.c [deleted file]
drivers/gpu/drm/i2c/adv7511.h [deleted file]
drivers/gpu/drm/i2c/ch7006_drv.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/Kconfig.debug
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/gvt/Makefile [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/debug.h [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/gvt.c [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/gvt.h [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/hypercall.h [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/mpt.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c [deleted file]
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_batch_pool.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_dmabuf.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_reg.h
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h
drivers/gpu/drm/i915/i915_pci.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_pvinfo.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/i915_vgpu.h
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_breadcrumbs.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_color.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_device_info.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_aux_backlight.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dpio_phy.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
drivers/gpu/drm/i915/intel_dsi_pll.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_fifo_underrun.c
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_loader.c
drivers/gpu/drm/i915/intel_gvt.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_gvt.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_mocs.c
drivers/gpu/drm/i915/intel_modes.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_vbt_defs.h
drivers/gpu/drm/imx/Kconfig
drivers/gpu/drm/imx/dw_hdmi-imx.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-drm.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/imx/ipuv3-plane.h
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/mediatek/Kconfig
drivers/gpu/drm/mediatek/Makefile
drivers/gpu/drm/mediatek/mtk_cec.c [new file with mode: 0644]
drivers/gpu/drm/mediatek/mtk_cec.h [new file with mode: 0644]
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c [new file with mode: 0644]
drivers/gpu/drm/mediatek/mtk_hdmi.h [new file with mode: 0644]
drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c [new file with mode: 0644]
drivers/gpu/drm/mediatek/mtk_hdmi_regs.h [new file with mode: 0644]
drivers/gpu/drm/mediatek/mtk_mipi_tx.c
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c [new file with mode: 0644]
drivers/gpu/drm/mgag200/Kconfig
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/mgag200/mgag200_ttm.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/dsi/dsi_cfg.c
drivers/gpu/drm/msm/dsi/dsi_cfg.h
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
drivers/gpu/drm/msm/edp/edp_connector.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi.h
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c [new file with mode: 0644]
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/msm/msm_gem_shrinker.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_kms.h
drivers/gpu/drm/msm/msm_perf.c
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/nouveau/Kconfig
drivers/gpu/drm/nouveau/dispnv04/crtc.c
drivers/gpu/drm/nouveau/dispnv04/disp.c
drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_chan.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nouveau_hwmon.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nouveau_usif.c
drivers/gpu/drm/nouveau/nv04_fbcon.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nvc0_fbcon.c
drivers/gpu/drm/nouveau/nvkm/core/subdev.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
drivers/gpu/drm/omapdrm/Kconfig
drivers/gpu/drm/omapdrm/displays/Kconfig
drivers/gpu/drm/omapdrm/displays/Makefile
drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
drivers/gpu/drm/omapdrm/displays/connector-dvi.c
drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
drivers/gpu/drm/omapdrm/displays/panel-dpi.c
drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
drivers/gpu/drm/omapdrm/dss/core.c
drivers/gpu/drm/omapdrm/dss/dispc.c
drivers/gpu/drm/omapdrm/dss/dispc.h
drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
drivers/gpu/drm/omapdrm/dss/display.c
drivers/gpu/drm/omapdrm/dss/dpi.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/dss-of.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/omapdrm/dss/dss.h
drivers/gpu/drm/omapdrm/dss/dss_features.c
drivers/gpu/drm/omapdrm/dss/dss_features.h
drivers/gpu/drm/omapdrm/dss/hdmi.h
drivers/gpu/drm/omapdrm/dss/hdmi4.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/omapdrm/dss/hdmi5.c
drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
drivers/gpu/drm/omapdrm/dss/hdmi_common.c
drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
drivers/gpu/drm/omapdrm/dss/omapdss.h
drivers/gpu/drm/omapdrm/dss/output.c
drivers/gpu/drm/omapdrm/dss/pll.c
drivers/gpu/drm/omapdrm/dss/rfbi.c
drivers/gpu/drm/omapdrm/dss/sdi.c
drivers/gpu/drm/omapdrm/dss/venc.c
drivers/gpu/drm/omapdrm/dss/video-pll.c
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_debugfs.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_fbdev.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/qxl/Kconfig
drivers/gpu/drm/qxl/qxl_cmd.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_draw.c
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_fb.c
drivers/gpu/drm/qxl/qxl_kms.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/qxl/qxl_ttm.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_crtc.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/rcar-du/Makefile
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.h
drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c [deleted file]
drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h [deleted file]
drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
drivers/gpu/drm/rcar-du/rcar_du_kms.c
drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
drivers/gpu/drm/rcar-du/rcar_du_plane.c
drivers/gpu/drm/rcar-du/rcar_du_regs.h
drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
drivers/gpu/drm/rockchip/Kconfig
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
drivers/gpu/drm/rockchip/inno_hdmi.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/shmobile/Kconfig
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/sis/sis_mm.c
drivers/gpu/drm/sti/Kconfig
drivers/gpu/drm/sti/sti_awg_utils.c
drivers/gpu/drm/sti/sti_compositor.c
drivers/gpu/drm/sti/sti_compositor.h
drivers/gpu/drm/sti/sti_crtc.c
drivers/gpu/drm/sti/sti_cursor.c
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sti/sti_drv.h
drivers/gpu/drm/sti/sti_dvo.c
drivers/gpu/drm/sti/sti_gdp.c
drivers/gpu/drm/sti/sti_hda.c
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_hdmi.h
drivers/gpu/drm/sti/sti_hqvdp.c
drivers/gpu/drm/sti/sti_mixer.c
drivers/gpu/drm/sti/sti_mixer.h
drivers/gpu/drm/sti/sti_plane.c
drivers/gpu/drm/sti/sti_plane.h
drivers/gpu/drm/sti/sti_tvout.c
drivers/gpu/drm/sti/sti_vid.c
drivers/gpu/drm/sti/sti_vid.h
drivers/gpu/drm/sti/sti_vtg.c
drivers/gpu/drm/sun4i/Kconfig
drivers/gpu/drm/sun4i/sun4i_backend.c
drivers/gpu/drm/sun4i/sun4i_crtc.c
drivers/gpu/drm/sun4i/sun4i_dotclock.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_framebuffer.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/sun4i/sun4i_tv.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dpaux.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/dsi.c
drivers/gpu/drm/tegra/fb.c
drivers/gpu/drm/tegra/hdmi.c
drivers/gpu/drm/tegra/hdmi.h
drivers/gpu/drm/tegra/output.c
drivers/gpu/drm/tegra/rgb.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tegra/sor.h
drivers/gpu/drm/tilcdc/Kconfig
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/udl/Kconfig
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_modeset.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_dpi.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vc4/vc4_qpu_defines.h
drivers/gpu/drm/vc4/vc4_regs.h
drivers/gpu/drm/vc4/vc4_validate.c
drivers/gpu/drm/vc4/vc4_validate_shaders.c
drivers/gpu/drm/vgem/Makefile
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/vgem/vgem_drv.h
drivers/gpu/drm/vgem/vgem_fence.c [new file with mode: 0644]
drivers/gpu/drm/via/via_mm.c
drivers/gpu/drm/virtio/Kconfig
drivers/gpu/drm/virtio/virtgpu_display.c
drivers/gpu/drm/virtio/virtgpu_drm_bus.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/gpu/drm/virtio/virtgpu_ttm.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/host1x/cdma.c
drivers/gpu/host1x/channel.c
drivers/gpu/host1x/debug.c
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/dev.h
drivers/gpu/host1x/hw/cdma_hw.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/host1x/hw/debug_hw.c
drivers/gpu/host1x/hw/intr_hw.c
drivers/gpu/host1x/hw/syncpt_hw.c
drivers/gpu/host1x/intr.c
drivers/gpu/host1x/intr.h
drivers/gpu/host1x/job.c
drivers/gpu/host1x/syncpt.c
drivers/gpu/host1x/syncpt.h
drivers/gpu/ipu-v3/ipu-dc.c
drivers/gpu/ipu-v3/ipu-di.c
drivers/gpu/ipu-v3/ipu-dmfc.c
drivers/gpu/vga/vga_switcheroo.c
drivers/hid/hid-elo.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/lm90.c
drivers/hwtracing/coresight/coresight-tmc-etr.c
drivers/hwtracing/coresight/coresight.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-octeon.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-boardinfo.c
drivers/i2c/muxes/i2c-mux-reg.c
drivers/iio/accel/kxsd9.c
drivers/iio/accel/st_accel_buffer.c
drivers/iio/accel/st_accel_core.c
drivers/iio/adc/ad7266.c
drivers/iio/common/st_sensors/st_sensors_buffer.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/dac/Kconfig
drivers/iio/dac/ad5592r-base.c
drivers/iio/gyro/st_gyro_buffer.c
drivers/iio/gyro/st_gyro_core.c
drivers/iio/humidity/am2315.c
drivers/iio/humidity/hdc100x.c
drivers/iio/imu/bmi160/bmi160_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
drivers/iio/industrialio-trigger.c
drivers/iio/light/apds9960.c
drivers/iio/light/bh1780.c
drivers/iio/light/max44000.c
drivers/iio/magnetometer/st_magn_buffer.c
drivers/iio/magnetometer/st_magn_core.c
drivers/iio/pressure/bmp280.c
drivers/iio/pressure/st_pressure_buffer.c
drivers/iio/pressure/st_pressure_core.c
drivers/iio/proximity/as3935.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/device.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/mad.c
drivers/infiniband/hw/hfi1/mad.h
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/qsfp.c
drivers/infiniband/hw/hfi1/trace.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/hfi1/verbs_txreq.c
drivers/infiniband/hw/hfi1/verbs_txreq.h
drivers/infiniband/hw/i40iw/i40iw.h
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/ah.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/usnic/usnic_uiom.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/rdmavt/vt.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/input/joystick/xpad.c
drivers/input/mouse/elantech.c
drivers/input/mouse/vmmouse.c
drivers/input/rmi4/rmi_bus.c
drivers/input/rmi4/rmi_f12.c
drivers/input/touchscreen/ts4800-ts.c
drivers/input/touchscreen/tsc2004.c
drivers/input/touchscreen/tsc2005.c
drivers/input/touchscreen/tsc200x-core.c
drivers/input/touchscreen/tsc200x-core.h
drivers/input/touchscreen/wacom_w8001.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/intel-iommu.c
drivers/iommu/iova.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-pic32-evic.c
drivers/leds/led-core.c
drivers/leds/trigger/ledtrig-heartbeat.c
drivers/mcb/mcb-core.c
drivers/media/i2c/adv7604.c
drivers/media/platform/omap/omap_vout.c
drivers/media/platform/omap/omap_voutdef.h
drivers/media/platform/omap/omap_voutlib.c
drivers/media/usb/airspy/airspy.c
drivers/media/usb/uvc/uvc_v4l2.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/memory/omap-gpmc.c
drivers/mfd/max77620.c
drivers/misc/mei/client.c
drivers/mmc/card/block.c
drivers/mmc/core/mmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/sunxi-mmc.c
drivers/mtd/nand/omap2.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/kapi.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/can/at91_can.c
drivers/net/can/c_can/c_can.c
drivers/net/can/dev.c
drivers/net/can/usb/Kconfig
drivers/net/can/usb/gs_usb.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/arc/emac_mdio.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.h
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvneta_bm.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/pd.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/microchip/enc28j60.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qed/qed_sriov.h
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/fddi/skfp/Makefile
drivers/net/geneve.c
drivers/net/macsec.c
drivers/net/phy/dp83867.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/marvell.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/team.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/pegasus.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath9k/reg.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/nvdimm/pfn_devs.c
drivers/nvme/host/core.c
drivers/nvme/host/pci.c
drivers/of/fdt.c
drivers/of/irq.c
drivers/of/of_reserved_mem.c
drivers/pci/vc.c
drivers/perf/arm_pmu.c
drivers/phy/phy-bcm-ns-usb2.c
drivers/phy/phy-exynos-mipi-video.c
drivers/phy/phy-miphy28lp.c
drivers/phy/phy-rcar-gen3-usb2.c
drivers/phy/phy-rockchip-dp.c
drivers/phy/phy-stih407-usb.c
drivers/phy/phy-sun4i-usb.c
drivers/phy/phy-ti-pipe3.c
drivers/phy/phy-twl4030-usb.c
drivers/pinctrl/Makefile
drivers/pinctrl/freescale/pinctrl-imx.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/pinconf-generic.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/tegra/Makefile
drivers/platform/chrome/cros_ec_dev.c
drivers/platform/x86/Kconfig
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/power_supply_core.c
drivers/power/tps65217_charger.c
drivers/pps/clients/pps_parport.c
drivers/ptp/ptp_chardev.c
drivers/pwm/core.c
drivers/pwm/pwm-atmel-hlcdc.c
drivers/pwm/sysfs.c
drivers/regulator/anatop-regulator.c
drivers/regulator/max77620-regulator.c
drivers/regulator/qcom_smd-regulator.c
drivers/regulator/tps51632-regulator.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/53c700.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/linit.c
drivers/scsi/ipr.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/spi/spi-rockchip.c
drivers/spi/spi-sun4i.c
drivers/spi/spi-sun6i.c
drivers/spi/spi-ti-qspi.c
drivers/staging/android/sync.h
drivers/staging/iio/accel/sca3000_core.c
drivers/staging/iio/adc/ad7606_spi.c
drivers/staging/iio/impedance-analyzer/ad5933.c
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
drivers/staging/rtl8188eu/core/rtw_efuse.c
drivers/staging/rtl8188eu/hal/usb_halinit.c
drivers/thermal/cpu_cooling.c
drivers/thermal/int340x_thermal/int3406_thermal.c
drivers/tty/Kconfig
drivers/tty/pty.c
drivers/tty/vt/keyboard.c
drivers/tty/vt/vt.c
drivers/usb/common/usb-otg-fsm.c
drivers/usb/core/hcd.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/dwc3/dwc3-st.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_tcm.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/storage_common.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/udc-core.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-msm.c
drivers/usb/host/ehci-st.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/ohci-q.c
drivers/usb/host/ohci-st.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/musb/omap2430.c
drivers/usb/musb/sunxi.c
drivers/usb/phy/phy-twl6030-usb.c
drivers/usb/serial/mos7720.c
drivers/usb/storage/uas.c
drivers/usb/usbip/vhci_hcd.c
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/pci/vfio_pci_intrs.c
drivers/vfio/vfio_iommu_type1.c
drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
drivers/video/fbdev/omap2/omapfb/dss/apply.c
drivers/video/fbdev/omap2/omapfb/dss/core.c
drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
drivers/video/fbdev/omap2/omapfb/dss/dispc.c
drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
drivers/video/fbdev/omap2/omapfb/dss/display.c
drivers/video/fbdev/omap2/omapfb/dss/dpi.c
drivers/video/fbdev/omap2/omapfb/dss/dsi.c
drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
drivers/video/fbdev/omap2/omapfb/dss/dss.c
drivers/video/fbdev/omap2/omapfb/dss/dss.h
drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
drivers/video/fbdev/omap2/omapfb/dss/manager.c
drivers/video/fbdev/omap2/omapfb/dss/output.c
drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
drivers/video/fbdev/omap2/omapfb/dss/overlay.c
drivers/video/fbdev/omap2/omapfb/dss/pll.c
drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
drivers/video/fbdev/omap2/omapfb/dss/sdi.c
drivers/video/fbdev/omap2/omapfb/dss/venc.c
drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
drivers/video/fbdev/omap2/omapfb/omapfb.h
drivers/watchdog/Kconfig
drivers/xen/balloon.c
drivers/xen/xen-acpi-processor.c
drivers/xen/xen-pciback/conf_space.c
drivers/xen/xen-pciback/conf_space_header.c
drivers/xen/xenbus/xenbus_dev_frontend.c
drivers/xen/xenbus/xenbus_xs.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/autofs4/autofs_i.h
fs/autofs4/expire.c
fs/autofs4/root.c
fs/autofs4/waitq.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/btrfs/check-integrity.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/hash.c
fs/btrfs/hash.h
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.h
fs/btrfs/reada.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/btrfs-tests.h
fs/btrfs/tests/extent-buffer-tests.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/free-space-tests.c
fs/btrfs/tests/free-space-tree-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/cachefiles/interface.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/cache.h
fs/ceph/caps.c
fs/ceph/export.c
fs/ceph/file.c
fs/ceph/super.h
fs/cifs/cifs_unicode.c
fs/cifs/cifs_unicode.h
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/ntlmssp.h
fs/cifs/sess.c
fs/cifs/smb2pdu.c
fs/configfs/file.c
fs/coredump.c
fs/dax.c
fs/dcache.c
fs/debugfs/file.c
fs/devpts/inode.c
fs/ecryptfs/crypto.c
fs/ecryptfs/file.c
fs/ecryptfs/main.c
fs/fs-writeback.c
fs/fscache/page.c
fs/fuse/dir.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/inode.c
fs/internal.h
fs/jbd2/journal.c
fs/libfs.c
fs/lockd/svc.c
fs/locks.c
fs/namei.c
fs/namespace.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pnfs.c
fs/nfs/pnfs_nfs.c
fs/nfs/read.c
fs/nfsd/blocklayout.c
fs/nfsd/nfs2acl.c
fs/nfsd/nfs3acl.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/nilfs2/the_nilfs.c
fs/ocfs2/Makefile
fs/ocfs2/buffer_head_io.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/super.c
fs/posix_acl.c
fs/proc/root.c
fs/reiserfs/super.c
fs/ubifs/file.c
fs/udf/partition.c
fs/udf/super.c
fs/udf/udf_sb.h
fs/xfs/xfs_ioctl.c
include/acpi/acpi_drivers.h
include/acpi/acpixf.h
include/acpi/video.h
include/asm-generic/qspinlock.h
include/asm-generic/vmlinux.lds.h
include/drm/bridge/analogix_dp.h
include/drm/drmP.h
include/drm/drm_atomic.h
include/drm/drm_atomic_helper.h
include/drm/drm_auth.h [new file with mode: 0644]
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_fb_cma_helper.h
include/drm/drm_fb_helper.h
include/drm/drm_fourcc.h [new file with mode: 0644]
include/drm/drm_irq.h [new file with mode: 0644]
include/drm/drm_legacy.h
include/drm/drm_mipi_dsi.h
include/drm/drm_modes.h
include/drm/drm_modeset_helper_vtables.h
include/drm/drm_plane_helper.h
include/drm/drm_simple_kms_helper.h [new file with mode: 0644]
include/drm/i915_drm.h
include/drm/i915_pciids.h
include/drm/intel-gtt.h
include/drm/ttm/ttm_bo_api.h
include/drm/ttm/ttm_bo_driver.h
include/kvm/arm_pmu.h
include/linux/audit.h
include/linux/bcma/bcma.h
include/linux/binfmts.h
include/linux/bpf.h
include/linux/ceph/osd_client.h
include/linux/ceph/osdmap.h
include/linux/clk-provider.h
include/linux/cpuidle.h
include/linux/dcache.h
include/linux/devpts_fs.h
include/linux/dma-buf.h
include/linux/efi.h
include/linux/fence-array.h [new file with mode: 0644]
include/linux/fence.h
include/linux/filter.h
include/linux/fscache-cache.h
include/linux/huge_mm.h
include/linux/iio/common/st_sensors.h
include/linux/inet_diag.h
include/linux/init_task.h
include/linux/io-mapping.h
include/linux/irqchip/arm-gic-v3.h
include/linux/isa.h
include/linux/jump_label.h
include/linux/kasan.h
include/linux/leds.h
include/linux/memcontrol.h
include/linux/mfd/da9052/da9052.h
include/linux/mlx4/device.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/mlx5/vport.h
include/linux/mm.h
include/linux/namei.h
include/linux/net.h
include/linux/netdevice.h
include/linux/of.h
include/linux/of_pci.h
include/linux/of_reserved_mem.h
include/linux/page_idle.h
include/linux/pinctrl/pinconf-generic.h
include/linux/platform_data/omapdss.h [new file with mode: 0644]
include/linux/posix_acl.h
include/linux/pwm.h
include/linux/qed/qed_eth_if.h
include/linux/radix-tree.h
include/linux/reservation.h
include/linux/reset.h
include/linux/rmap.h
include/linux/sched.h
include/linux/sctp.h
include/linux/seqlock.h
include/linux/skbuff.h
include/linux/sock_diag.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/svc_xprt.h
include/linux/sunrpc/xprt.h
include/linux/thermal.h
include/linux/timekeeping.h
include/linux/usb/ehci_def.h
include/linux/usb/gadget.h
include/linux/usb/musb.h
include/linux/vga_switcheroo.h
include/net/bonding.h
include/net/compat.h
include/net/gre.h
include/net/ip.h
include/net/ip6_tunnel.h
include/net/ip_vs.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_queue.h
include/net/netfilter/nf_tables.h
include/net/netns/netfilter.h
include/net/pkt_cls.h
include/net/pkt_sched.h
include/net/sch_generic.h
include/net/sock.h
include/net/switchdev.h
include/net/tc_act/tc_ife.h
include/rdma/ib_verbs.h
include/rdma/rdma_vt.h
include/sound/hdmi-codec.h
include/sound/omap-hdmi-audio.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/i915_drm.h
include/uapi/drm/msm_drm.h
include/uapi/drm/vc4_drm.h
include/uapi/drm/vgem_drm.h [new file with mode: 0644]
include/uapi/linux/Kbuild
include/uapi/linux/btrfs.h
include/uapi/linux/ethtool.h
include/uapi/linux/fuse.h
include/uapi/linux/gtp.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/input.h
include/uapi/linux/netfilter/Kbuild
include/uapi/linux/netfilter/xt_SYNPROXY.h
include/uapi/linux/pkt_cls.h
include/uapi/sound/Kbuild
include/video/imx-ipu-v3.h
include/video/omap-panel-data.h
include/video/omapdss.h [deleted file]
include/video/omapfb_dss.h [new file with mode: 0644]
init/Kconfig
init/main.c
kernel/audit.c
kernel/audit.h
kernel/auditsc.c
kernel/bpf/inode.c
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/cpu.c
kernel/events/core.c
kernel/fork.c
kernel/futex.c
kernel/gcov/gcc_4_7.c
kernel/irq/ipi.c
kernel/jump_label.c
kernel/kcov.c
kernel/locking/mutex-debug.c
kernel/locking/mutex-debug.h
kernel/locking/mutex.c
kernel/locking/mutex.h
kernel/locking/qspinlock.c
kernel/power/process.c
kernel/relay.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/loadavg.c
kernel/sched/sched.h
kernel/sched/stats.h
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/trace/bpf_trace.c
kernel/trace/trace_printk.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/test_uuid.c [new file with mode: 0644]
lib/uuid.c
mm/compaction.c
mm/fadvise.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/kasan/kasan.c
mm/kasan/quarantine.c
mm/kmemleak.c
mm/memcontrol.c
mm/memory.c
mm/mempool.c
mm/migrate.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/page_owner.c
mm/page_poison.c
mm/percpu.c
mm/rmap.c
mm/shmem.c
mm/slab_common.c
mm/swap.c
mm/swap_state.c
mm/vmalloc.c
mm/vmstat.c
mm/workingset.c
mm/z3fold.c
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_dev.c
net/8021q/vlan_netlink.c
net/atm/signaling.c
net/atm/svc.c
net/ax25/af_ax25.c
net/ax25/ax25_ds_timer.c
net/ax25/ax25_std_timer.c
net/ax25/ax25_subr.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/originator.c
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bridge/br_fdb.c
net/bridge/br_input.c
net/bridge/br_multicast.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/ceph/osd_client.c
net/ceph/osdmap.c
net/compat.c
net/core/filter.c
net/core/flow_dissector.c
net/core/gen_stats.c
net/core/hwbm.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/pktgen.c
net/core/skbuff.c
net/core/sock.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/decnet/dn_fib.c
net/ieee802154/nl802154.c
net/ipv4/af_inet.c
net/ipv4/esp4.c
net/ipv4/fib_semantics.c
net/ipv4/gre_demux.c
net/ipv4/ip_gre.c
net/ipv4/ip_output.c
net/ipv4/ipconfig.c
net/ipv4/ipmr.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/fou6.c
net/ipv6/icmp.c
net/ipv6/ip6_checksum.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/netfilter/nf_dup_ipv6.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/kcm/kcmproc.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ip6.c
net/lapb/lapb_in.c
net/lapb/lapb_out.c
net/lapb/lapb_subr.c
net/mac80211/mesh.c
net/mac80211/sta_info.h
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_irc.c
net/netfilter/nf_conntrack_sane.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_conntrack_tftp.c
net/netfilter/nf_queue.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_ct.c
net/netfilter/nft_hash.c
net/netfilter/nft_meta.c
net/netfilter/nft_rbtree.c
net/netfilter/x_tables.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/packet/af_packet.c
net/rds/ib_cm.c
net/rds/loop.c
net/rds/rds.h
net/rds/recv.c
net/rds/send.c
net/rds/sysctl.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/rds/threads.c
net/rds/transport.c
net/rose/rose_in.c
net/rxrpc/rxkad.c
net/sched/act_api.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_police.c
net/sched/cls_flower.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_drr.c
net/sched/sch_fifo.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_ingress.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_tbf.c
net/sctp/input.c
net/sctp/sctp_diag.c
net/sctp/socket.c
net/sunrpc/clnt.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprtsock.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/link.c
net/tipc/msg.c
net/tipc/msg.h
net/tipc/netlink_compat.c
net/tipc/node.c
net/tipc/socket.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/util.c
net/wireless/wext-core.c
scripts/checkpatch.pl
scripts/gdb/linux/.gitignore
scripts/gdb/linux/Makefile
scripts/gdb/linux/constants.py.in
scripts/gdb/linux/radixtree.py [deleted file]
scripts/gdb/linux/symbols.py
scripts/gdb/vmlinux-gdb.py
scripts/kernel-doc
scripts/mod/file2alias.c
security/apparmor/lsm.c
security/keys/compat.c
security/keys/dh.c
security/keys/internal.h
security/keys/key.c
security/keys/keyctl.c
sound/core/control.c
sound/core/pcm.c
sound/core/timer.c
sound/drivers/dummy.c
sound/hda/hdac_regmap.c
sound/pci/au88x0/au88x0_core.c
sound/pci/echoaudio/echoaudio.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/Kconfig
sound/soc/codecs/ak4613.c
sound/soc/codecs/cx20442.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5670.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8940.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/davinci/davinci-mcasp.h
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/atom/sst-mfld-platform-compress.c
sound/soc/intel/skylake/bxt-sst.c
sound/soc/omap/omap-hdmi-audio.c
sound/soc/sh/rcar/adg.c
sound/usb/card.c
tools/objtool/builtin-check.c
tools/perf/util/data-convert-bt.c
tools/perf/util/event.c
tools/perf/util/symbol.c
tools/testing/radix-tree/tag_check.c
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
tools/testing/selftests/net/reuseport_bpf.c
tools/testing/selftests/vm/compaction_test.c
tools/virtio/ringtest/Makefile
tools/virtio/ringtest/README
tools/virtio/ringtest/noring.c [new file with mode: 0644]
tools/virtio/ringtest/run-on-all.sh
tools/vm/slabinfo.c
virt/kvm/arm/hyp/vgic-v2-sr.c
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/irqchip.c
virt/kvm/kvm_main.c

index 4a293bea648d66306694f3c8ecf62645d5e2c852..52489f56406941d569b8f55ec2af9c70fbc89206 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
+Antoine Tenart <antoine.tenart@free-electrons.com>
 Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
 Archit Taneja <archit@ti.com>
 Arnaud Patard <arnaud.patard@rtp-net.org>
@@ -30,6 +31,9 @@ Axel Lin <axel.lin@gmail.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
+Boris Brezillon <boris.brezillon@free-electrons.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
 Brian Avery <b.avery@hp.com>
 Brian King <brking@us.ibm.com>
 Christoph Hellwig <hch@lst.de>
@@ -123,6 +127,7 @@ Santosh Shilimkar <santosh.shilimkar@oracle.org>
 Sascha Hauer <s.hauer@pengutronix.de>
 S.Çağlar Onur <caglar@pardus.org.tr>
 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
+Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> <shuah.khan@hp.com> <shuahkh@osg.samsung.com> <shuah.kh@samsung.com>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
diff --git a/Documentation/.gitignore b/Documentation/.gitignore
new file mode 100644 (file)
index 0000000..53752db
--- /dev/null
@@ -0,0 +1 @@
+output
index 2f4a0051b32d3bb7feb5552caba934d8b2f214d7..1ba0d0fda9c0b5cac32a0169790b5c87f819f808 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/uvc.name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   UVC function directory
 
                streaming_maxburst      - 0..15 (ss only)
@@ -9,37 +9,37 @@ Description:  UVC function directory
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Control descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/class
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/class/ss
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Super speed control class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/class/fs
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Full speed control class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Output terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default output terminal descriptors
 
                All attributes read only:
@@ -53,12 +53,12 @@ Description:        Default output terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Camera terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default camera terminal descriptors
 
                All attributes read only:
@@ -75,12 +75,12 @@ Description:        Default camera terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/processing
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Processing unit descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/processing/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default processing unit descriptors
 
                All attributes read only:
@@ -94,49 +94,49 @@ Description:        Default processing unit descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/header
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Control header descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/header/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific control header descriptors
 
 dwClockFrequency
 bcdUVC
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Streaming descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Super speed streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   High speed streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Full speed streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Color matching descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default color matching descriptors
 
                All attributes read only:
@@ -150,12 +150,12 @@ Description:      Default color matching descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   MJPEG format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific MJPEG format descriptors
 
                All attributes read only,
@@ -174,7 +174,7 @@ Description:        Specific MJPEG format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific MJPEG frame descriptors
 
                dwFrameInterval         - indicates how frame interval can be
@@ -196,12 +196,12 @@ Description:      Specific MJPEG frame descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Uncompressed format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific uncompressed format descriptors
 
                bmaControls             - this format's data for bmaControls in
@@ -221,7 +221,7 @@ Description:        Specific uncompressed format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific uncompressed frame descriptors
 
                dwFrameInterval         - indicates how frame interval can be
@@ -243,12 +243,12 @@ Description:      Specific uncompressed frame descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/header
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Streaming header descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific streaming header descriptors
 
                All attributes read only:
index 6708c5e264aa81875d0eec9c556b8b14429d4c49..33e96f74063925fb253b7ac16c73f43b76c334c8 100644 (file)
@@ -1,4 +1,4 @@
-What           /sys/bus/iio/devices/iio:deviceX/in_proximity_raw
+What           /sys/bus/iio/devices/iio:deviceX/in_proximity_input
 Date:          March 2014
 KernelVersion: 3.15
 Contact:       Matt Ranostay <mranostay@gmail.com>
index d70f9b68174e8bc388dc4bec617b28596b903cc4..f4482f9b221fe716d5e393b0784911079b438a21 100644 (file)
@@ -14,7 +14,7 @@ DOCBOOKS := z8530book.xml device-drivers.xml \
            genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
            80211.xml debugobjects.xml sh.xml regulator.xml \
            alsa-driver-api.xml writing-an-alsa-driver.xml \
-           tracepoint.xml gpu.xml media_api.xml w1.xml \
+           tracepoint.xml media_api.xml w1.xml \
            writing_musb_glue_layer.xml crypto-API.xml iio.xml
 
 include Documentation/DocBook/media/Makefile
@@ -33,10 +33,6 @@ PDF_METHOD   = $(prefer-db2x)
 PS_METHOD      = $(prefer-db2x)
 
 
-###
-# The targets that may be used.
-PHONY += xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs cleandocs
-
 targets += $(DOCBOOKS)
 BOOKS := $(addprefix $(obj)/,$(DOCBOOKS))
 xmldocs: $(BOOKS)
@@ -63,6 +59,9 @@ installmandocs: mandocs
                sort -k 2 -k 1 | uniq -f 1 | sed -e 's: :/:' | \
                xargs install -m 644 -t /usr/local/man/man9/
 
+# no-op for the DocBook toolchain
+epubdocs:
+
 ###
 #External programs used
 KERNELDOCXMLREF = $(srctree)/scripts/kernel-doc-xml-ref
index de79efdad46c2c691849b681b47c10f190658593..c3313d45f4d69d55c8926f419366f0982c6f40d1 100644 (file)
@@ -128,16 +128,48 @@ X!Edrivers/base/interface.c
 !Edrivers/base/platform.c
 !Edrivers/base/bus.c
      </sect1>
-     <sect1><title>Device Drivers DMA Management</title>
+     <sect1>
+       <title>Buffer Sharing and Synchronization</title>
+       <para>
+         The dma-buf subsystem provides the framework for sharing buffers
+         for hardware (DMA) access across multiple device drivers and
+         subsystems, and for synchronizing asynchronous hardware access.
+       </para>
+       <para>
+         This is used, for example, by drm "prime" multi-GPU support, but
+         is of course not limited to GPU use cases.
+       </para>
+       <para>
+         The three main components of this are: (1) dma-buf, representing
+         a sg_table and exposed to userspace as a file descriptor to allow
+         passing between devices, (2) fence, which provides a mechanism
+         to signal when one device as finished access, and (3) reservation,
+         which manages the shared or exclusive fence(s) associated with
+         the buffer.
+       </para>
+       <sect2><title>dma-buf</title>
 !Edrivers/dma-buf/dma-buf.c
+!Iinclude/linux/dma-buf.h
+       </sect2>
+       <sect2><title>reservation</title>
+!Pdrivers/dma-buf/reservation.c Reservation Object Overview
+!Edrivers/dma-buf/reservation.c
+!Iinclude/linux/reservation.h
+       </sect2>
+       <sect2><title>fence</title>
 !Edrivers/dma-buf/fence.c
-!Edrivers/dma-buf/seqno-fence.c
 !Iinclude/linux/fence.h
+!Edrivers/dma-buf/seqno-fence.c
 !Iinclude/linux/seqno-fence.h
+!Edrivers/dma-buf/fence-array.c
+!Iinclude/linux/fence-array.h
 !Edrivers/dma-buf/reservation.c
 !Iinclude/linux/reservation.h
 !Edrivers/dma-buf/sync_file.c
 !Iinclude/linux/sync_file.h
+       </sect2>
+     </sect1>
+     <sect1><title>Device Drivers DMA Management</title>
 !Edrivers/base/dma-coherent.c
 !Edrivers/base/dma-mapping.c
      </sect1>
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
deleted file mode 100644 (file)
index 7586bf7..0000000
+++ /dev/null
@@ -1,3540 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
-       "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-
-<book id="gpuDevelopersGuide">
-  <bookinfo>
-    <title>Linux GPU Driver Developer's Guide</title>
-
-    <authorgroup>
-      <author>
-       <firstname>Jesse</firstname>
-       <surname>Barnes</surname>
-       <contrib>Initial version</contrib>
-       <affiliation>
-         <orgname>Intel Corporation</orgname>
-         <address>
-           <email>jesse.barnes@intel.com</email>
-         </address>
-       </affiliation>
-      </author>
-      <author>
-       <firstname>Laurent</firstname>
-       <surname>Pinchart</surname>
-       <contrib>Driver internals</contrib>
-       <affiliation>
-         <orgname>Ideas on board SPRL</orgname>
-         <address>
-           <email>laurent.pinchart@ideasonboard.com</email>
-         </address>
-       </affiliation>
-      </author>
-      <author>
-       <firstname>Daniel</firstname>
-       <surname>Vetter</surname>
-       <contrib>Contributions all over the place</contrib>
-       <affiliation>
-         <orgname>Intel Corporation</orgname>
-         <address>
-           <email>daniel.vetter@ffwll.ch</email>
-         </address>
-       </affiliation>
-      </author>
-      <author>
-       <firstname>Lukas</firstname>
-       <surname>Wunner</surname>
-       <contrib>vga_switcheroo documentation</contrib>
-       <affiliation>
-         <address>
-           <email>lukas@wunner.de</email>
-         </address>
-       </affiliation>
-      </author>
-    </authorgroup>
-
-    <copyright>
-      <year>2008-2009</year>
-      <year>2013-2014</year>
-      <holder>Intel Corporation</holder>
-    </copyright>
-    <copyright>
-      <year>2012</year>
-      <holder>Laurent Pinchart</holder>
-    </copyright>
-    <copyright>
-      <year>2015</year>
-      <holder>Lukas Wunner</holder>
-    </copyright>
-
-    <legalnotice>
-      <para>
-       The contents of this file may be used under the terms of the GNU
-       General Public License version 2 (the "GPL") as distributed in
-       the kernel source COPYING file.
-      </para>
-    </legalnotice>
-
-    <revhistory>
-      <!-- Put document revisions here, newest first. -->
-      <revision>
-       <revnumber>1.0</revnumber>
-       <date>2012-07-13</date>
-       <authorinitials>LP</authorinitials>
-       <revremark>Added extensive documentation about driver internals.
-       </revremark>
-      </revision>
-      <revision>
-       <revnumber>1.1</revnumber>
-       <date>2015-10-11</date>
-       <authorinitials>LW</authorinitials>
-       <revremark>Added vga_switcheroo documentation.
-       </revremark>
-      </revision>
-    </revhistory>
-  </bookinfo>
-
-<toc></toc>
-
-<part id="drmCore">
-  <title>DRM Core</title>
-  <partintro>
-    <para>
-      This first part of the GPU Driver Developer's Guide documents core DRM
-      code, helper libraries for writing drivers and generic userspace
-      interfaces exposed by DRM drivers.
-    </para>
-  </partintro>
-
-  <chapter id="drmIntroduction">
-    <title>Introduction</title>
-    <para>
-      The Linux DRM layer contains code intended to support the needs
-      of complex graphics devices, usually containing programmable
-      pipelines well suited to 3D graphics acceleration.  Graphics
-      drivers in the kernel may make use of DRM functions to make
-      tasks like memory management, interrupt handling and DMA easier,
-      and provide a uniform interface to applications.
-    </para>
-    <para>
-      A note on versions: this guide covers features found in the DRM
-      tree, including the TTM memory manager, output configuration and
-      mode setting, and the new vblank internals, in addition to all
-      the regular features found in current kernels.
-    </para>
-    <para>
-      [Insert diagram of typical DRM stack here]
-    </para>
-  <sect1>
-    <title>Style Guidelines</title>
-    <para>
-      For consistency this documentation uses American English. Abbreviations
-      are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so
-      on. To aid in reading, documentations make full use of the markup
-      characters kerneldoc provides: @parameter for function parameters, @member
-      for structure members, &amp;structure to reference structures and
-      function() for functions. These all get automatically hyperlinked if
-      kerneldoc for the referenced objects exists. When referencing entries in
-      function vtables please use -&gt;vfunc(). Note that kerneldoc does
-      not support referencing struct members directly, so please add a reference
-      to the vtable struct somewhere in the same paragraph or at least section.
-    </para>
-    <para>
-      Except in special situations (to separate locked from unlocked variants)
-      locking requirements for functions aren't documented in the kerneldoc.
-      Instead locking should be check at runtime using e.g.
-      <code>WARN_ON(!mutex_is_locked(...));</code>. Since it's much easier to
-      ignore documentation than runtime noise this provides more value. And on
-      top of that runtime checks do need to be updated when the locking rules
-      change, increasing the chances that they're correct. Within the
-      documentation the locking rules should be explained in the relevant
-      structures: Either in the comment for the lock explaining what it
-      protects, or data fields need a note about which lock protects them, or
-      both.
-    </para>
-    <para>
-      Functions which have a non-<code>void</code> return value should have a
-      section called "Returns" explaining the expected return values in
-      different cases and their meanings. Currently there's no consensus whether
-      that section name should be all upper-case or not, and whether it should
-      end in a colon or not. Go with the file-local style. Other common section
-      names are "Notes" with information for dangerous or tricky corner cases,
-      and "FIXME" where the interface could be cleaned up.
-    </para>
-  </sect1>
-  </chapter>
-
-  <!-- Internals -->
-
-  <chapter id="drmInternals">
-    <title>DRM Internals</title>
-    <para>
-      This chapter documents DRM internals relevant to driver authors
-      and developers working to add support for the latest features to
-      existing drivers.
-    </para>
-    <para>
-      First, we go over some typical driver initialization
-      requirements, like setting up command buffers, creating an
-      initial output configuration, and initializing core services.
-      Subsequent sections cover core internals in more detail,
-      providing implementation notes and examples.
-    </para>
-    <para>
-      The DRM layer provides several services to graphics drivers,
-      many of them driven by the application interfaces it provides
-      through libdrm, the library that wraps most of the DRM ioctls.
-      These include vblank event handling, memory
-      management, output management, framebuffer management, command
-      submission &amp; fencing, suspend/resume support, and DMA
-      services.
-    </para>
-
-  <!-- Internals: driver init -->
-
-  <sect1>
-    <title>Driver Initialization</title>
-    <para>
-      At the core of every DRM driver is a <structname>drm_driver</structname>
-      structure. Drivers typically statically initialize a drm_driver structure,
-      and then pass it to <function>drm_dev_alloc()</function> to allocate a
-      device instance. After the device instance is fully initialized it can be
-      registered (which makes it accessible from userspace) using
-      <function>drm_dev_register()</function>.
-    </para>
-    <para>
-      The <structname>drm_driver</structname> structure contains static
-      information that describes the driver and features it supports, and
-      pointers to methods that the DRM core will call to implement the DRM API.
-      We will first go through the <structname>drm_driver</structname> static
-      information fields, and will then describe individual operations in
-      details as they get used in later sections.
-    </para>
-    <sect2>
-      <title>Driver Information</title>
-      <sect3>
-        <title>Driver Features</title>
-        <para>
-          Drivers inform the DRM core about their requirements and supported
-          features by setting appropriate flags in the
-          <structfield>driver_features</structfield> field. Since those flags
-          influence the DRM core behaviour since registration time, most of them
-          must be set to registering the <structname>drm_driver</structname>
-          instance.
-        </para>
-        <synopsis>u32 driver_features;</synopsis>
-        <variablelist>
-          <title>Driver Feature Flags</title>
-          <varlistentry>
-            <term>DRIVER_USE_AGP</term>
-            <listitem><para>
-              Driver uses AGP interface, the DRM core will manage AGP resources.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_REQUIRE_AGP</term>
-            <listitem><para>
-              Driver needs AGP interface to function. AGP initialization failure
-              will become a fatal error.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_PCI_DMA</term>
-            <listitem><para>
-              Driver is capable of PCI DMA, mapping of PCI DMA buffers to
-              userspace will be enabled. Deprecated.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_SG</term>
-            <listitem><para>
-              Driver can perform scatter/gather DMA, allocation and mapping of
-              scatter/gather buffers will be enabled. Deprecated.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_HAVE_DMA</term>
-            <listitem><para>
-              Driver supports DMA, the userspace DMA API will be supported.
-              Deprecated.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_HAVE_IRQ</term><term>DRIVER_IRQ_SHARED</term>
-            <listitem><para>
-              DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler
-              managed by the DRM Core. The core will support simple IRQ handler
-              installation when the flag is set. The installation process is
-              described in <xref linkend="drm-irq-registration"/>.</para>
-              <para>DRIVER_IRQ_SHARED indicates whether the device &amp; handler
-              support shared IRQs (note that this is required of PCI  drivers).
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_GEM</term>
-            <listitem><para>
-              Driver use the GEM memory manager.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_MODESET</term>
-            <listitem><para>
-              Driver supports mode setting interfaces (KMS).
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_PRIME</term>
-            <listitem><para>
-              Driver implements DRM PRIME buffer sharing.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_RENDER</term>
-            <listitem><para>
-              Driver supports dedicated render nodes.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRIVER_ATOMIC</term>
-            <listitem><para>
-              Driver supports atomic properties.  In this case the driver
-              must implement appropriate obj->atomic_get_property() vfuncs
-              for any modeset objects with driver specific properties.
-            </para></listitem>
-          </varlistentry>
-        </variablelist>
-      </sect3>
-      <sect3>
-        <title>Major, Minor and Patchlevel</title>
-        <synopsis>int major;
-int minor;
-int patchlevel;</synopsis>
-        <para>
-          The DRM core identifies driver versions by a major, minor and patch
-          level triplet. The information is printed to the kernel log at
-          initialization time and passed to userspace through the
-          DRM_IOCTL_VERSION ioctl.
-        </para>
-        <para>
-          The major and minor numbers are also used to verify the requested driver
-          API version passed to DRM_IOCTL_SET_VERSION. When the driver API changes
-          between minor versions, applications can call DRM_IOCTL_SET_VERSION to
-          select a specific version of the API. If the requested major isn't equal
-          to the driver major, or the requested minor is larger than the driver
-          minor, the DRM_IOCTL_SET_VERSION call will return an error. Otherwise
-          the driver's set_version() method will be called with the requested
-          version.
-        </para>
-      </sect3>
-      <sect3>
-        <title>Name, Description and Date</title>
-        <synopsis>char *name;
-char *desc;
-char *date;</synopsis>
-        <para>
-          The driver name is printed to the kernel log at initialization time,
-          used for IRQ registration and passed to userspace through
-          DRM_IOCTL_VERSION.
-        </para>
-        <para>
-          The driver description is a purely informative string passed to
-          userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
-          the kernel.
-        </para>
-        <para>
-          The driver date, formatted as YYYYMMDD, is meant to identify the date of
-          the latest modification to the driver. However, as most drivers fail to
-          update it, its value is mostly useless. The DRM core prints it to the
-          kernel log at initialization time and passes it to userspace through the
-          DRM_IOCTL_VERSION ioctl.
-        </para>
-      </sect3>
-    </sect2>
-    <sect2>
-      <title>Device Instance and Driver Handling</title>
-!Pdrivers/gpu/drm/drm_drv.c driver instance overview
-!Edrivers/gpu/drm/drm_drv.c
-    </sect2>
-    <sect2>
-      <title>Driver Load</title>
-      <sect3 id="drm-irq-registration">
-        <title>IRQ Registration</title>
-        <para>
-          The DRM core tries to facilitate IRQ handler registration and
-          unregistration by providing <function>drm_irq_install</function> and
-          <function>drm_irq_uninstall</function> functions. Those functions only
-          support a single interrupt per device, devices that use more than one
-          IRQs need to be handled manually.
-        </para>
-        <sect4>
-          <title>Managed IRQ Registration</title>
-          <para>
-            <function>drm_irq_install</function> starts by calling the
-            <methodname>irq_preinstall</methodname> driver operation. The operation
-            is optional and must make sure that the interrupt will not get fired by
-            clearing all pending interrupt flags or disabling the interrupt.
-          </para>
-          <para>
-            The passed-in IRQ will then be requested by a call to
-            <function>request_irq</function>. If the DRIVER_IRQ_SHARED driver
-            feature flag is set, a shared (IRQF_SHARED) IRQ handler will be
-            requested.
-          </para>
-          <para>
-            The IRQ handler function must be provided as the mandatory irq_handler
-            driver operation. It will get passed directly to
-            <function>request_irq</function> and thus has the same prototype as all
-            IRQ handlers. It will get called with a pointer to the DRM device as the
-            second argument.
-          </para>
-          <para>
-            Finally the function calls the optional
-            <methodname>irq_postinstall</methodname> driver operation. The operation
-            usually enables interrupts (excluding the vblank interrupt, which is
-            enabled separately), but drivers may choose to enable/disable interrupts
-            at a different time.
-          </para>
-          <para>
-            <function>drm_irq_uninstall</function> is similarly used to uninstall an
-            IRQ handler. It starts by waking up all processes waiting on a vblank
-            interrupt to make sure they don't hang, and then calls the optional
-            <methodname>irq_uninstall</methodname> driver operation. The operation
-            must disable all hardware interrupts. Finally the function frees the IRQ
-            by calling <function>free_irq</function>.
-          </para>
-        </sect4>
-        <sect4>
-          <title>Manual IRQ Registration</title>
-          <para>
-            Drivers that require multiple interrupt handlers can't use the managed
-            IRQ registration functions. In that case IRQs must be registered and
-            unregistered manually (usually with the <function>request_irq</function>
-            and <function>free_irq</function> functions, or their devm_* equivalent).
-          </para>
-          <para>
-            When manually registering IRQs, drivers must not set the DRIVER_HAVE_IRQ
-            driver feature flag, and must not provide the
-           <methodname>irq_handler</methodname> driver operation. They must set the
-           <structname>drm_device</structname> <structfield>irq_enabled</structfield>
-           field to 1 upon registration of the IRQs, and clear it to 0 after
-           unregistering the IRQs.
-          </para>
-        </sect4>
-      </sect3>
-      <sect3>
-        <title>Memory Manager Initialization</title>
-        <para>
-          Every DRM driver requires a memory manager which must be initialized at
-          load time. DRM currently contains two memory managers, the Translation
-          Table Manager (TTM) and the Graphics Execution Manager (GEM).
-          This document describes the use of the GEM memory manager only. See
-          <xref linkend="drm-memory-management"/> for details.
-        </para>
-      </sect3>
-      <sect3>
-        <title>Miscellaneous Device Configuration</title>
-        <para>
-          Another task that may be necessary for PCI devices during configuration
-          is mapping the video BIOS. On many devices, the VBIOS describes device
-          configuration, LCD panel timings (if any), and contains flags indicating
-          device state. Mapping the BIOS can be done using the pci_map_rom() call,
-          a convenience function that takes care of mapping the actual ROM,
-          whether it has been shadowed into memory (typically at address 0xc0000)
-          or exists on the PCI device in the ROM BAR. Note that after the ROM has
-          been mapped and any necessary information has been extracted, it should
-          be unmapped; on many devices, the ROM address decoder is shared with
-          other BARs, so leaving it mapped could cause undesired behaviour like
-          hangs or memory corruption.
-  <!--!Fdrivers/pci/rom.c pci_map_rom-->
-        </para>
-      </sect3>
-    </sect2>
-    <sect2>
-      <title>Bus-specific Device Registration and PCI Support</title>
-      <para>
-        A number of functions are provided to help with device registration.
-       The functions deal with PCI and platform devices respectively and are
-       only provided for historical reasons. These are all deprecated and
-       shouldn't be used in new drivers. Besides that there's a few
-       helpers for pci drivers.
-      </para>
-!Edrivers/gpu/drm/drm_pci.c
-!Edrivers/gpu/drm/drm_platform.c
-    </sect2>
-  </sect1>
-
-  <!-- Internals: memory management -->
-
-  <sect1 id="drm-memory-management">
-    <title>Memory management</title>
-    <para>
-      Modern Linux systems require large amount of graphics memory to store
-      frame buffers, textures, vertices and other graphics-related data. Given
-      the very dynamic nature of many of that data, managing graphics memory
-      efficiently is thus crucial for the graphics stack and plays a central
-      role in the DRM infrastructure.
-    </para>
-    <para>
-      The DRM core includes two memory managers, namely Translation Table Maps
-      (TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
-      manager to be developed and tried to be a one-size-fits-them all
-      solution. It provides a single userspace API to accommodate the need of
-      all hardware, supporting both Unified Memory Architecture (UMA) devices
-      and devices with dedicated video RAM (i.e. most discrete video cards).
-      This resulted in a large, complex piece of code that turned out to be
-      hard to use for driver development.
-    </para>
-    <para>
-      GEM started as an Intel-sponsored project in reaction to TTM's
-      complexity. Its design philosophy is completely different: instead of
-      providing a solution to every graphics memory-related problems, GEM
-      identified common code between drivers and created a support library to
-      share it. GEM has simpler initialization and execution requirements than
-      TTM, but has no video RAM management capabilities and is thus limited to
-      UMA devices.
-    </para>
-    <sect2>
-      <title>The Translation Table Manager (TTM)</title>
-      <para>
-        TTM design background and information belongs here.
-      </para>
-      <sect3>
-        <title>TTM initialization</title>
-        <warning><para>This section is outdated.</para></warning>
-        <para>
-          Drivers wishing to support TTM must fill out a drm_bo_driver
-          structure. The structure contains several fields with function
-          pointers for initializing the TTM, allocating and freeing memory,
-          waiting for command completion and fence synchronization, and memory
-          migration. See the radeon_ttm.c file for an example of usage.
-        </para>
-        <para>
-          The ttm_global_reference structure is made up of several fields:
-        </para>
-        <programlisting>
-          struct ttm_global_reference {
-                  enum ttm_global_types global_type;
-                  size_t size;
-                  void *object;
-                  int (*init) (struct ttm_global_reference *);
-                  void (*release) (struct ttm_global_reference *);
-          };
-        </programlisting>
-        <para>
-          There should be one global reference structure for your memory
-          manager as a whole, and there will be others for each object
-          created by the memory manager at runtime.  Your global TTM should
-          have a type of TTM_GLOBAL_TTM_MEM.  The size field for the global
-          object should be sizeof(struct ttm_mem_global), and the init and
-          release hooks should point at your driver-specific init and
-          release routines, which probably eventually call
-          ttm_mem_global_init and ttm_mem_global_release, respectively.
-        </para>
-        <para>
-          Once your global TTM accounting structure is set up and initialized
-          by calling ttm_global_item_ref() on it,
-          you need to create a buffer object TTM to
-          provide a pool for buffer object allocation by clients and the
-          kernel itself.  The type of this object should be TTM_GLOBAL_TTM_BO,
-          and its size should be sizeof(struct ttm_bo_global).  Again,
-          driver-specific init and release functions may be provided,
-          likely eventually calling ttm_bo_global_init() and
-          ttm_bo_global_release(), respectively.  Also, like the previous
-          object, ttm_global_item_ref() is used to create an initial reference
-          count for the TTM, which will call your initialization function.
-        </para>
-      </sect3>
-    </sect2>
-    <sect2 id="drm-gem">
-      <title>The Graphics Execution Manager (GEM)</title>
-      <para>
-        The GEM design approach has resulted in a memory manager that doesn't
-        provide full coverage of all (or even all common) use cases in its
-        userspace or kernel API. GEM exposes a set of standard memory-related
-        operations to userspace and a set of helper functions to drivers, and let
-        drivers implement hardware-specific operations with their own private API.
-      </para>
-      <para>
-        The GEM userspace API is described in the
-        <ulink url="http://lwn.net/Articles/283798/"><citetitle>GEM - the Graphics
-        Execution Manager</citetitle></ulink> article on LWN. While slightly
-        outdated, the document provides a good overview of the GEM API principles.
-        Buffer allocation and read and write operations, described as part of the
-        common GEM API, are currently implemented using driver-specific ioctls.
-      </para>
-      <para>
-        GEM is data-agnostic. It manages abstract buffer objects without knowing
-        what individual buffers contain. APIs that require knowledge of buffer
-        contents or purpose, such as buffer allocation or synchronization
-        primitives, are thus outside of the scope of GEM and must be implemented
-        using driver-specific ioctls.
-      </para>
-      <para>
-        On a fundamental level, GEM involves several operations:
-        <itemizedlist>
-          <listitem>Memory allocation and freeing</listitem>
-          <listitem>Command execution</listitem>
-          <listitem>Aperture management at command execution time</listitem>
-        </itemizedlist>
-        Buffer object allocation is relatively straightforward and largely
-        provided by Linux's shmem layer, which provides memory to back each
-        object.
-      </para>
-      <para>
-        Device-specific operations, such as command execution, pinning, buffer
-        read &amp; write, mapping, and domain ownership transfers are left to
-        driver-specific ioctls.
-      </para>
-      <sect3>
-        <title>GEM Initialization</title>
-        <para>
-          Drivers that use GEM must set the DRIVER_GEM bit in the struct
-          <structname>drm_driver</structname>
-          <structfield>driver_features</structfield> field. The DRM core will
-          then automatically initialize the GEM core before calling the
-          <methodname>load</methodname> operation. Behind the scene, this will
-          create a DRM Memory Manager object which provides an address space
-          pool for object allocation.
-        </para>
-        <para>
-          In a KMS configuration, drivers need to allocate and initialize a
-          command ring buffer following core GEM initialization if required by
-          the hardware. UMA devices usually have what is called a "stolen"
-          memory region, which provides space for the initial framebuffer and
-          large, contiguous memory regions required by the device. This space is
-          typically not managed by GEM, and must be initialized separately into
-          its own DRM MM object.
-        </para>
-      </sect3>
-      <sect3>
-        <title>GEM Objects Creation</title>
-        <para>
-          GEM splits creation of GEM objects and allocation of the memory that
-          backs them in two distinct operations.
-        </para>
-        <para>
-          GEM objects are represented by an instance of struct
-          <structname>drm_gem_object</structname>. Drivers usually need to extend
-          GEM objects with private information and thus create a driver-specific
-          GEM object structure type that embeds an instance of struct
-          <structname>drm_gem_object</structname>.
-        </para>
-        <para>
-          To create a GEM object, a driver allocates memory for an instance of its
-          specific GEM object type and initializes the embedded struct
-          <structname>drm_gem_object</structname> with a call to
-          <function>drm_gem_object_init</function>. The function takes a pointer to
-          the DRM device, a pointer to the GEM object and the buffer object size
-          in bytes.
-        </para>
-        <para>
-          GEM uses shmem to allocate anonymous pageable memory.
-          <function>drm_gem_object_init</function> will create an shmfs file of
-          the requested size and store it into the struct
-          <structname>drm_gem_object</structname> <structfield>filp</structfield>
-          field. The memory is used as either main storage for the object when the
-          graphics hardware uses system memory directly or as a backing store
-          otherwise.
-        </para>
-        <para>
-          Drivers are responsible for the actual physical pages allocation by
-          calling <function>shmem_read_mapping_page_gfp</function> for each page.
-          Note that they can decide to allocate pages when initializing the GEM
-          object, or to delay allocation until the memory is needed (for instance
-          when a page fault occurs as a result of a userspace memory access or
-          when the driver needs to start a DMA transfer involving the memory).
-        </para>
-        <para>
-          Anonymous pageable memory allocation is not always desired, for instance
-          when the hardware requires physically contiguous system memory as is
-          often the case in embedded devices. Drivers can create GEM objects with
-          no shmfs backing (called private GEM objects) by initializing them with
-          a call to <function>drm_gem_private_object_init</function> instead of
-          <function>drm_gem_object_init</function>. Storage for private GEM
-          objects must be managed by drivers.
-        </para>
-      </sect3>
-      <sect3>
-        <title>GEM Objects Lifetime</title>
-        <para>
-          All GEM objects are reference-counted by the GEM core. References can be
-          acquired and release by <function>calling drm_gem_object_reference</function>
-          and <function>drm_gem_object_unreference</function> respectively. The
-          caller must hold the <structname>drm_device</structname>
-         <structfield>struct_mutex</structfield> lock when calling
-         <function>drm_gem_object_reference</function>. As a convenience, GEM
-         provides <function>drm_gem_object_unreference_unlocked</function>
-         functions that can be called without holding the lock.
-        </para>
-        <para>
-          When the last reference to a GEM object is released the GEM core calls
-          the <structname>drm_driver</structname>
-          <methodname>gem_free_object</methodname> operation. That operation is
-          mandatory for GEM-enabled drivers and must free the GEM object and all
-          associated resources.
-        </para>
-        <para>
-          <synopsis>void (*gem_free_object) (struct drm_gem_object *obj);</synopsis>
-          Drivers are responsible for freeing all GEM object resources. This includes
-          the resources created by the GEM core, which need to be released with
-          <function>drm_gem_object_release</function>.
-        </para>
-      </sect3>
-      <sect3>
-        <title>GEM Objects Naming</title>
-        <para>
-          Communication between userspace and the kernel refers to GEM objects
-          using local handles, global names or, more recently, file descriptors.
-          All of those are 32-bit integer values; the usual Linux kernel limits
-          apply to the file descriptors.
-        </para>
-        <para>
-          GEM handles are local to a DRM file. Applications get a handle to a GEM
-          object through a driver-specific ioctl, and can use that handle to refer
-          to the GEM object in other standard or driver-specific ioctls. Closing a
-          DRM file handle frees all its GEM handles and dereferences the
-          associated GEM objects.
-        </para>
-        <para>
-          To create a handle for a GEM object drivers call
-          <function>drm_gem_handle_create</function>. The function takes a pointer
-          to the DRM file and the GEM object and returns a locally unique handle.
-          When the handle is no longer needed drivers delete it with a call to
-          <function>drm_gem_handle_delete</function>. Finally the GEM object
-          associated with a handle can be retrieved by a call to
-          <function>drm_gem_object_lookup</function>.
-        </para>
-        <para>
-          Handles don't take ownership of GEM objects, they only take a reference
-          to the object that will be dropped when the handle is destroyed. To
-          avoid leaking GEM objects, drivers must make sure they drop the
-          reference(s) they own (such as the initial reference taken at object
-          creation time) as appropriate, without any special consideration for the
-          handle. For example, in the particular case of combined GEM object and
-          handle creation in the implementation of the
-          <methodname>dumb_create</methodname> operation, drivers must drop the
-          initial reference to the GEM object before returning the handle.
-        </para>
-        <para>
-          GEM names are similar in purpose to handles but are not local to DRM
-          files. They can be passed between processes to reference a GEM object
-          globally. Names can't be used directly to refer to objects in the DRM
-          API, applications must convert handles to names and names to handles
-          using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
-          respectively. The conversion is handled by the DRM core without any
-          driver-specific support.
-        </para>
-        <para>
-          GEM also supports buffer sharing with dma-buf file descriptors through
-          PRIME. GEM-based drivers must use the provided helpers functions to
-          implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
-          Since sharing file descriptors is inherently more secure than the
-          easily guessable and global GEM names it is the preferred buffer
-          sharing mechanism. Sharing buffers through GEM names is only supported
-          for legacy userspace. Furthermore PRIME also allows cross-device
-          buffer sharing since it is based on dma-bufs.
-        </para>
-      </sect3>
-      <sect3 id="drm-gem-objects-mapping">
-        <title>GEM Objects Mapping</title>
-        <para>
-          Because mapping operations are fairly heavyweight GEM favours
-          read/write-like access to buffers, implemented through driver-specific
-          ioctls, over mapping buffers to userspace. However, when random access
-          to the buffer is needed (to perform software rendering for instance),
-          direct access to the object can be more efficient.
-        </para>
-        <para>
-          The mmap system call can't be used directly to map GEM objects, as they
-          don't have their own file handle. Two alternative methods currently
-          co-exist to map GEM objects to userspace. The first method uses a
-          driver-specific ioctl to perform the mapping operation, calling
-          <function>do_mmap</function> under the hood. This is often considered
-          dubious, seems to be discouraged for new GEM-enabled drivers, and will
-          thus not be described here.
-        </para>
-        <para>
-          The second method uses the mmap system call on the DRM file handle.
-          <synopsis>void *mmap(void *addr, size_t length, int prot, int flags, int fd,
-             off_t offset);</synopsis>
-          DRM identifies the GEM object to be mapped by a fake offset passed
-          through the mmap offset argument. Prior to being mapped, a GEM object
-          must thus be associated with a fake offset. To do so, drivers must call
-          <function>drm_gem_create_mmap_offset</function> on the object.
-        </para>
-        <para>
-          Once allocated, the fake offset value
-          must be passed to the application in a driver-specific way and can then
-          be used as the mmap offset argument.
-        </para>
-        <para>
-          The GEM core provides a helper method <function>drm_gem_mmap</function>
-          to handle object mapping. The method can be set directly as the mmap
-          file operation handler. It will look up the GEM object based on the
-          offset value and set the VMA operations to the
-          <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
-          field. Note that <function>drm_gem_mmap</function> doesn't map memory to
-          userspace, but relies on the driver-provided fault handler to map pages
-          individually.
-        </para>
-        <para>
-          To use <function>drm_gem_mmap</function>, drivers must fill the struct
-          <structname>drm_driver</structname> <structfield>gem_vm_ops</structfield>
-          field with a pointer to VM operations.
-        </para>
-        <para>
-          <synopsis>struct vm_operations_struct *gem_vm_ops
-
-  struct vm_operations_struct {
-          void (*open)(struct vm_area_struct * area);
-          void (*close)(struct vm_area_struct * area);
-          int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
-  };</synopsis>
-        </para>
-        <para>
-          The <methodname>open</methodname> and <methodname>close</methodname>
-          operations must update the GEM object reference count. Drivers can use
-          the <function>drm_gem_vm_open</function> and
-          <function>drm_gem_vm_close</function> helper functions directly as open
-          and close handlers.
-        </para>
-        <para>
-          The fault operation handler is responsible for mapping individual pages
-          to userspace when a page fault occurs. Depending on the memory
-          allocation scheme, drivers can allocate pages at fault time, or can
-          decide to allocate memory for the GEM object at the time the object is
-          created.
-        </para>
-        <para>
-          Drivers that want to map the GEM object upfront instead of handling page
-          faults can implement their own mmap file operation handler.
-        </para>
-      </sect3>
-      <sect3>
-        <title>Memory Coherency</title>
-        <para>
-          When mapped to the device or used in a command buffer, backing pages
-          for an object are flushed to memory and marked write combined so as to
-          be coherent with the GPU. Likewise, if the CPU accesses an object
-          after the GPU has finished rendering to the object, then the object
-          must be made coherent with the CPU's view of memory, usually involving
-          GPU cache flushing of various kinds. This core CPU&lt;-&gt;GPU
-          coherency management is provided by a device-specific ioctl, which
-          evaluates an object's current domain and performs any necessary
-          flushing or synchronization to put the object into the desired
-          coherency domain (note that the object may be busy, i.e. an active
-          render target; in that case, setting the domain blocks the client and
-          waits for rendering to complete before performing any necessary
-          flushing operations).
-        </para>
-      </sect3>
-      <sect3>
-        <title>Command Execution</title>
-        <para>
-          Perhaps the most important GEM function for GPU devices is providing a
-          command execution interface to clients. Client programs construct
-          command buffers containing references to previously allocated memory
-          objects, and then submit them to GEM. At that point, GEM takes care to
-          bind all the objects into the GTT, execute the buffer, and provide
-          necessary synchronization between clients accessing the same buffers.
-          This often involves evicting some objects from the GTT and re-binding
-          others (a fairly expensive operation), and providing relocation
-          support which hides fixed GTT offsets from clients. Clients must take
-          care not to submit command buffers that reference more objects than
-          can fit in the GTT; otherwise, GEM will reject them and no rendering
-          will occur. Similarly, if several objects in the buffer require fence
-          registers to be allocated for correct rendering (e.g. 2D blits on
-          pre-965 chips), care must be taken not to require more fence registers
-          than are available to the client. Such resource management should be
-          abstracted from the client in libdrm.
-        </para>
-      </sect3>
-    </sect2>
-    <sect2>
-      <title>GEM Function Reference</title>
-!Edrivers/gpu/drm/drm_gem.c
-!Iinclude/drm/drm_gem.h
-    </sect2>
-    <sect2>
-      <title>VMA Offset Manager</title>
-!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
-!Edrivers/gpu/drm/drm_vma_manager.c
-!Iinclude/drm/drm_vma_manager.h
-    </sect2>
-    <sect2 id="drm-prime-support">
-      <title>PRIME Buffer Sharing</title>
-      <para>
-        PRIME is the cross device buffer sharing framework in drm, originally
-        created for the OPTIMUS range of multi-gpu platforms. To userspace
-        PRIME buffers are dma-buf based file descriptors.
-      </para>
-      <sect3>
-        <title>Overview and Driver Interface</title>
-        <para>
-          Similar to GEM global names, PRIME file descriptors are
-          also used to share buffer objects across processes. They offer
-          additional security: as file descriptors must be explicitly sent over
-          UNIX domain sockets to be shared between applications, they can't be
-          guessed like the globally unique GEM names.
-        </para>
-        <para>
-          Drivers that support the PRIME
-          API must set the DRIVER_PRIME bit in the struct
-          <structname>drm_driver</structname>
-          <structfield>driver_features</structfield> field, and implement the
-          <methodname>prime_handle_to_fd</methodname> and
-          <methodname>prime_fd_to_handle</methodname> operations.
-        </para>
-        <para>
-          <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
-                          struct drm_file *file_priv, uint32_t handle,
-                          uint32_t flags, int *prime_fd);
-int (*prime_fd_to_handle)(struct drm_device *dev,
-                          struct drm_file *file_priv, int prime_fd,
-                          uint32_t *handle);</synopsis>
-            Those two operations convert a handle to a PRIME file descriptor and
-            vice versa. Drivers must use the kernel dma-buf buffer sharing framework
-            to manage the PRIME file descriptors. Similar to the mode setting
-            API PRIME is agnostic to the underlying buffer object manager, as
-            long as handles are 32bit unsigned integers.
-          </para>
-          <para>
-            While non-GEM drivers must implement the operations themselves, GEM
-            drivers must use the <function>drm_gem_prime_handle_to_fd</function>
-            and <function>drm_gem_prime_fd_to_handle</function> helper functions.
-            Those helpers rely on the driver
-            <methodname>gem_prime_export</methodname> and
-            <methodname>gem_prime_import</methodname> operations to create a dma-buf
-            instance from a GEM object (dma-buf exporter role) and to create a GEM
-            object from a dma-buf instance (dma-buf importer role).
-          </para>
-          <para>
-            <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
-                             struct drm_gem_object *obj,
-                             int flags);
-struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
-                                            struct dma_buf *dma_buf);</synopsis>
-            These two operations are mandatory for GEM drivers that support
-            PRIME.
-          </para>
-        </sect3>
-      <sect3>
-        <title>PRIME Helper Functions</title>
-!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
-      </sect3>
-    </sect2>
-    <sect2>
-      <title>PRIME Function References</title>
-!Edrivers/gpu/drm/drm_prime.c
-    </sect2>
-    <sect2>
-      <title>DRM MM Range Allocator</title>
-      <sect3>
-        <title>Overview</title>
-!Pdrivers/gpu/drm/drm_mm.c Overview
-      </sect3>
-      <sect3>
-        <title>LRU Scan/Eviction Support</title>
-!Pdrivers/gpu/drm/drm_mm.c lru scan roaster
-      </sect3>
-      </sect2>
-    <sect2>
-      <title>DRM MM Range Allocator Function References</title>
-!Edrivers/gpu/drm/drm_mm.c
-!Iinclude/drm/drm_mm.h
-    </sect2>
-    <sect2>
-      <title>CMA Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_gem_cma_helper.c cma helpers
-!Edrivers/gpu/drm/drm_gem_cma_helper.c
-!Iinclude/drm/drm_gem_cma_helper.h
-    </sect2>
-  </sect1>
-
-  <!-- Internals: mode setting -->
-
-  <sect1 id="drm-mode-setting">
-    <title>Mode Setting</title>
-    <para>
-      Drivers must initialize the mode setting core by calling
-      <function>drm_mode_config_init</function> on the DRM device. The function
-      initializes the <structname>drm_device</structname>
-      <structfield>mode_config</structfield> field and never fails. Once done,
-      mode configuration must be setup by initializing the following fields.
-    </para>
-    <itemizedlist>
-      <listitem>
-        <synopsis>int min_width, min_height;
-int max_width, max_height;</synopsis>
-        <para>
-         Minimum and maximum width and height of the frame buffers in pixel
-         units.
-       </para>
-      </listitem>
-      <listitem>
-        <synopsis>struct drm_mode_config_funcs *funcs;</synopsis>
-       <para>Mode setting functions.</para>
-      </listitem>
-    </itemizedlist>
-    <sect2>
-      <title>Display Modes Function Reference</title>
-!Iinclude/drm/drm_modes.h
-!Edrivers/gpu/drm/drm_modes.c
-    </sect2>
-    <sect2>
-      <title>Atomic Mode Setting Function Reference</title>
-!Edrivers/gpu/drm/drm_atomic.c
-!Idrivers/gpu/drm/drm_atomic.c
-    </sect2>
-    <sect2>
-      <title>Frame Buffer Abstraction</title>
-      <para>
-        Frame buffers are abstract memory objects that provide a source of
-        pixels to scanout to a CRTC. Applications explicitly request the
-        creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls and
-        receive an opaque handle that can be passed to the KMS CRTC control,
-        plane configuration and page flip functions.
-      </para>
-      <para>
-        Frame buffers rely on the underneath memory manager for low-level memory
-        operations. When creating a frame buffer applications pass a memory
-        handle (or a list of memory handles for multi-planar formats) through
-       the <parameter>drm_mode_fb_cmd2</parameter> argument. For drivers using
-       GEM as their userspace buffer management interface this would be a GEM
-       handle.  Drivers are however free to use their own backing storage object
-       handles, e.g. vmwgfx directly exposes special TTM handles to userspace
-       and so expects TTM handles in the create ioctl and not GEM handles.
-      </para>
-      <para>
-       The lifetime of a drm framebuffer is controlled with a reference count,
-       drivers can grab additional references with
-       <function>drm_framebuffer_reference</function>and drop them
-       again with <function>drm_framebuffer_unreference</function>. For
-       driver-private framebuffers for which the last reference is never
-       dropped (e.g. for the fbdev framebuffer when the struct
-       <structname>drm_framebuffer</structname> is embedded into the fbdev
-       helper struct) drivers can manually clean up a framebuffer at module
-       unload time with
-       <function>drm_framebuffer_unregister_private</function>.
-      </para>
-    </sect2>
-    <sect2>
-      <title>Dumb Buffer Objects</title>
-      <para>
-       The KMS API doesn't standardize backing storage object creation and
-       leaves it to driver-specific ioctls. Furthermore actually creating a
-       buffer object even for GEM-based drivers is done through a
-       driver-specific ioctl - GEM only has a common userspace interface for
-       sharing and destroying objects. While not an issue for full-fledged
-       graphics stacks that include device-specific userspace components (in
-       libdrm for instance), this limit makes DRM-based early boot graphics
-       unnecessarily complex.
-      </para>
-      <para>
-        Dumb objects partly alleviate the problem by providing a standard
-        API to create dumb buffers suitable for scanout, which can then be used
-        to create KMS frame buffers.
-      </para>
-      <para>
-        To support dumb objects drivers must implement the
-        <methodname>dumb_create</methodname>,
-        <methodname>dumb_destroy</methodname> and
-        <methodname>dumb_map_offset</methodname> operations.
-      </para>
-      <itemizedlist>
-        <listitem>
-          <synopsis>int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev,
-                   struct drm_mode_create_dumb *args);</synopsis>
-          <para>
-            The <methodname>dumb_create</methodname> operation creates a driver
-           object (GEM or TTM handle) suitable for scanout based on the
-           width, height and depth from the struct
-           <structname>drm_mode_create_dumb</structname> argument. It fills the
-           argument's <structfield>handle</structfield>,
-           <structfield>pitch</structfield> and <structfield>size</structfield>
-           fields with a handle for the newly created object and its line
-            pitch and size in bytes.
-          </para>
-        </listitem>
-        <listitem>
-          <synopsis>int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev,
-                    uint32_t handle);</synopsis>
-          <para>
-            The <methodname>dumb_destroy</methodname> operation destroys a dumb
-            object created by <methodname>dumb_create</methodname>.
-          </para>
-        </listitem>
-        <listitem>
-          <synopsis>int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev,
-                       uint32_t handle, uint64_t *offset);</synopsis>
-          <para>
-            The <methodname>dumb_map_offset</methodname> operation associates an
-            mmap fake offset with the object given by the handle and returns
-            it. Drivers must use the
-            <function>drm_gem_create_mmap_offset</function> function to
-            associate the fake offset as described in
-            <xref linkend="drm-gem-objects-mapping"/>.
-          </para>
-        </listitem>
-      </itemizedlist>
-      <para>
-        Note that dumb objects may not be used for gpu acceleration, as has been
-       attempted on some ARM embedded platforms. Such drivers really must have
-       a hardware-specific ioctl to allocate suitable buffer objects.
-      </para>
-    </sect2>
-    <sect2>
-      <title>Output Polling</title>
-      <synopsis>void (*output_poll_changed)(struct drm_device *dev);</synopsis>
-      <para>
-        This operation notifies the driver that the status of one or more
-        connectors has changed. Drivers that use the fb helper can just call the
-        <function>drm_fb_helper_hotplug_event</function> function to handle this
-        operation.
-      </para>
-    </sect2>
-    <sect2>
-      <title>Locking</title>
-      <para>
-        Beside some lookup structures with their own locking (which is hidden
-       behind the interface functions) most of the modeset state is protected
-       by the <code>dev-&lt;mode_config.lock</code> mutex and additionally
-       per-crtc locks to allow cursor updates, pageflips and similar operations
-       to occur concurrently with background tasks like output detection.
-       Operations which cross domains like a full modeset always grab all
-       locks. Drivers there need to protect resources shared between crtcs with
-       additional locking. They also need to be careful to always grab the
-       relevant crtc locks if a modset functions touches crtc state, e.g. for
-       load detection (which does only grab the <code>mode_config.lock</code>
-       to allow concurrent screen updates on live crtcs).
-      </para>
-    </sect2>
-  </sect1>
-
-  <!-- Internals: kms initialization and cleanup -->
-
-  <sect1 id="drm-kms-init">
-    <title>KMS Initialization and Cleanup</title>
-    <para>
-      A KMS device is abstracted and exposed as a set of planes, CRTCs, encoders
-      and connectors. KMS drivers must thus create and initialize all those
-      objects at load time after initializing mode setting.
-    </para>
-    <sect2>
-      <title>CRTCs (struct <structname>drm_crtc</structname>)</title>
-      <para>
-        A CRTC is an abstraction representing a part of the chip that contains a
-       pointer to a scanout buffer. Therefore, the number of CRTCs available
-       determines how many independent scanout buffers can be active at any
-       given time. The CRTC structure contains several fields to support this:
-       a pointer to some video memory (abstracted as a frame buffer object), a
-       display mode, and an (x, y) offset into the video memory to support
-       panning or configurations where one piece of video memory spans multiple
-       CRTCs.
-      </para>
-      <sect3>
-        <title>CRTC Initialization</title>
-        <para>
-          A KMS device must create and register at least one struct
-          <structname>drm_crtc</structname> instance. The instance is allocated
-          and zeroed by the driver, possibly as part of a larger structure, and
-          registered with a call to <function>drm_crtc_init</function> with a
-          pointer to CRTC functions.
-        </para>
-      </sect3>
-    </sect2>
-    <sect2>
-      <title>Planes (struct <structname>drm_plane</structname>)</title>
-      <para>
-        A plane represents an image source that can be blended with or overlayed
-       on top of a CRTC during the scanout process. Planes are associated with
-       a frame buffer to crop a portion of the image memory (source) and
-       optionally scale it to a destination size. The result is then blended
-       with or overlayed on top of a CRTC.
-      </para>
-      <para>
-      The DRM core recognizes three types of planes:
-      <itemizedlist>
-        <listitem>
-        DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC.  Primary
-        planes are the planes operated upon by CRTC modesetting and flipping
-       operations described in the page_flip hook in <structname>drm_crtc_funcs</structname>.
-        </listitem>
-        <listitem>
-        DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC.  Cursor
-        planes are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
-        DRM_IOCTL_MODE_CURSOR2 ioctls.
-        </listitem>
-        <listitem>
-        DRM_PLANE_TYPE_OVERLAY represents all non-primary, non-cursor planes.
-        Some drivers refer to these types of planes as "sprites" internally.
-        </listitem>
-      </itemizedlist>
-      For compatibility with legacy userspace, only overlay planes are made
-      available to userspace by default.  Userspace clients may set the
-      DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that
-      they wish to receive a universal plane list containing all plane types.
-      </para>
-      <sect3>
-        <title>Plane Initialization</title>
-        <para>
-          To create a plane, a KMS drivers allocates and
-          zeroes an instances of struct <structname>drm_plane</structname>
-          (possibly as part of a larger structure) and registers it with a call
-          to <function>drm_universal_plane_init</function>. The function takes a bitmask
-          of the CRTCs that can be associated with the plane, a pointer to the
-          plane functions, a list of format supported formats, and the type of
-          plane (primary, cursor, or overlay) being initialized.
-        </para>
-        <para>
-          Cursor and overlay planes are optional.  All drivers should provide
-          one primary plane per CRTC (although this requirement may change in
-          the future); drivers that do not wish to provide special handling for
-          primary planes may make use of the helper functions described in
-          <xref linkend="drm-kms-planehelpers"/> to create and register a
-          primary plane with standard capabilities.
-        </para>
-      </sect3>
-    </sect2>
-    <sect2>
-      <title>Encoders (struct <structname>drm_encoder</structname>)</title>
-      <para>
-        An encoder takes pixel data from a CRTC and converts it to a format
-       suitable for any attached connectors. On some devices, it may be
-       possible to have a CRTC send data to more than one encoder. In that
-       case, both encoders would receive data from the same scanout buffer,
-       resulting in a "cloned" display configuration across the connectors
-       attached to each encoder.
-      </para>
-      <sect3>
-        <title>Encoder Initialization</title>
-        <para>
-          As for CRTCs, a KMS driver must create, initialize and register at
-          least one struct <structname>drm_encoder</structname> instance. The
-          instance is allocated and zeroed by the driver, possibly as part of a
-          larger structure.
-        </para>
-        <para>
-          Drivers must initialize the struct <structname>drm_encoder</structname>
-          <structfield>possible_crtcs</structfield> and
-          <structfield>possible_clones</structfield> fields before registering the
-          encoder. Both fields are bitmasks of respectively the CRTCs that the
-          encoder can be connected to, and sibling encoders candidate for cloning.
-        </para>
-        <para>
-          After being initialized, the encoder must be registered with a call to
-          <function>drm_encoder_init</function>. The function takes a pointer to
-          the encoder functions and an encoder type. Supported types are
-          <itemizedlist>
-            <listitem>
-              DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
-              </listitem>
-            <listitem>
-              DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
-            </listitem>
-            <listitem>
-              DRM_MODE_ENCODER_LVDS for display panels
-            </listitem>
-            <listitem>
-              DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video, Component,
-              SCART)
-            </listitem>
-            <listitem>
-              DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
-            </listitem>
-          </itemizedlist>
-        </para>
-        <para>
-          Encoders must be attached to a CRTC to be used. DRM drivers leave
-          encoders unattached at initialization time. Applications (or the fbdev
-          compatibility layer when implemented) are responsible for attaching the
-          encoders they want to use to a CRTC.
-        </para>
-      </sect3>
-    </sect2>
-    <sect2>
-      <title>Connectors (struct <structname>drm_connector</structname>)</title>
-      <para>
-        A connector is the final destination for pixel data on a device, and
-       usually connects directly to an external display device like a monitor
-       or laptop panel. A connector can only be attached to one encoder at a
-       time. The connector is also the structure where information about the
-       attached display is kept, so it contains fields for display data, EDID
-       data, DPMS &amp; connection status, and information about modes
-       supported on the attached displays.
-      </para>
-      <sect3>
-        <title>Connector Initialization</title>
-        <para>
-          Finally a KMS driver must create, initialize, register and attach at
-          least one struct <structname>drm_connector</structname> instance. The
-          instance is created as other KMS objects and initialized by setting the
-          following fields.
-        </para>
-        <variablelist>
-          <varlistentry>
-            <term><structfield>interlace_allowed</structfield></term>
-            <listitem><para>
-              Whether the connector can handle interlaced modes.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term><structfield>doublescan_allowed</structfield></term>
-            <listitem><para>
-              Whether the connector can handle doublescan.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term><structfield>display_info
-            </structfield></term>
-            <listitem><para>
-              Display information is filled from EDID information when a display
-              is detected. For non hot-pluggable displays such as flat panels in
-              embedded systems, the driver should initialize the
-              <structfield>display_info</structfield>.<structfield>width_mm</structfield>
-              and
-              <structfield>display_info</structfield>.<structfield>height_mm</structfield>
-              fields with the physical size of the display.
-            </para></listitem>
-          </varlistentry>
-          <varlistentry>
-            <term id="drm-kms-connector-polled"><structfield>polled</structfield></term>
-            <listitem><para>
-              Connector polling mode, a combination of
-              <variablelist>
-                <varlistentry>
-                  <term>DRM_CONNECTOR_POLL_HPD</term>
-                  <listitem><para>
-                    The connector generates hotplug events and doesn't need to be
-                    periodically polled. The CONNECT and DISCONNECT flags must not
-                    be set together with the HPD flag.
-                  </para></listitem>
-                </varlistentry>
-                <varlistentry>
-                  <term>DRM_CONNECTOR_POLL_CONNECT</term>
-                  <listitem><para>
-                    Periodically poll the connector for connection.
-                  </para></listitem>
-                </varlistentry>
-                <varlistentry>
-                  <term>DRM_CONNECTOR_POLL_DISCONNECT</term>
-                  <listitem><para>
-                    Periodically poll the connector for disconnection.
-                  </para></listitem>
-                </varlistentry>
-              </variablelist>
-              Set to 0 for connectors that don't support connection status
-              discovery.
-            </para></listitem>
-          </varlistentry>
-        </variablelist>
-        <para>
-          The connector is then registered with a call to
-          <function>drm_connector_init</function> with a pointer to the connector
-          functions and a connector type, and exposed through sysfs with a call to
-          <function>drm_connector_register</function>.
-        </para>
-        <para>
-          Supported connector types are
-          <itemizedlist>
-            <listitem>DRM_MODE_CONNECTOR_VGA</listitem>
-            <listitem>DRM_MODE_CONNECTOR_DVII</listitem>
-            <listitem>DRM_MODE_CONNECTOR_DVID</listitem>
-            <listitem>DRM_MODE_CONNECTOR_DVIA</listitem>
-            <listitem>DRM_MODE_CONNECTOR_Composite</listitem>
-            <listitem>DRM_MODE_CONNECTOR_SVIDEO</listitem>
-            <listitem>DRM_MODE_CONNECTOR_LVDS</listitem>
-            <listitem>DRM_MODE_CONNECTOR_Component</listitem>
-            <listitem>DRM_MODE_CONNECTOR_9PinDIN</listitem>
-            <listitem>DRM_MODE_CONNECTOR_DisplayPort</listitem>
-            <listitem>DRM_MODE_CONNECTOR_HDMIA</listitem>
-            <listitem>DRM_MODE_CONNECTOR_HDMIB</listitem>
-            <listitem>DRM_MODE_CONNECTOR_TV</listitem>
-            <listitem>DRM_MODE_CONNECTOR_eDP</listitem>
-            <listitem>DRM_MODE_CONNECTOR_VIRTUAL</listitem>
-          </itemizedlist>
-        </para>
-        <para>
-          Connectors must be attached to an encoder to be used. For devices that
-          map connectors to encoders 1:1, the connector should be attached at
-          initialization time with a call to
-          <function>drm_mode_connector_attach_encoder</function>. The driver must
-          also set the <structname>drm_connector</structname>
-          <structfield>encoder</structfield> field to point to the attached
-          encoder.
-        </para>
-        <para>
-          Finally, drivers must initialize the connectors state change detection
-          with a call to <function>drm_kms_helper_poll_init</function>. If at
-          least one connector is pollable but can't generate hotplug interrupts
-          (indicated by the DRM_CONNECTOR_POLL_CONNECT and
-          DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
-          automatically be queued to periodically poll for changes. Connectors
-          that can generate hotplug interrupts must be marked with the
-          DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
-          call <function>drm_helper_hpd_irq_event</function>. The function will
-          queue a delayed work to check the state of all connectors, but no
-          periodic polling will be done.
-        </para>
-      </sect3>
-      <sect3>
-        <title>Connector Operations</title>
-        <note><para>
-          Unless otherwise state, all operations are mandatory.
-        </para></note>
-        <sect4>
-          <title>DPMS</title>
-          <synopsis>void (*dpms)(struct drm_connector *connector, int mode);</synopsis>
-          <para>
-            The DPMS operation sets the power state of a connector. The mode
-            argument is one of
-            <itemizedlist>
-              <listitem><para>DRM_MODE_DPMS_ON</para></listitem>
-              <listitem><para>DRM_MODE_DPMS_STANDBY</para></listitem>
-              <listitem><para>DRM_MODE_DPMS_SUSPEND</para></listitem>
-              <listitem><para>DRM_MODE_DPMS_OFF</para></listitem>
-            </itemizedlist>
-          </para>
-          <para>
-            In all but DPMS_ON mode the encoder to which the connector is attached
-            should put the display in low-power mode by driving its signals
-            appropriately. If more than one connector is attached to the encoder
-            care should be taken not to change the power state of other displays as
-            a side effect. Low-power mode should be propagated to the encoders and
-            CRTCs when all related connectors are put in low-power mode.
-          </para>
-        </sect4>
-        <sect4>
-          <title>Modes</title>
-          <synopsis>int (*fill_modes)(struct drm_connector *connector, uint32_t max_width,
-                      uint32_t max_height);</synopsis>
-          <para>
-            Fill the mode list with all supported modes for the connector. If the
-            <parameter>max_width</parameter> and <parameter>max_height</parameter>
-            arguments are non-zero, the implementation must ignore all modes wider
-            than <parameter>max_width</parameter> or higher than
-            <parameter>max_height</parameter>.
-          </para>
-          <para>
-            The connector must also fill in this operation its
-            <structfield>display_info</structfield>
-            <structfield>width_mm</structfield> and
-            <structfield>height_mm</structfield> fields with the connected display
-            physical size in millimeters. The fields should be set to 0 if the value
-            isn't known or is not applicable (for instance for projector devices).
-          </para>
-        </sect4>
-        <sect4>
-          <title>Connection Status</title>
-          <para>
-            The connection status is updated through polling or hotplug events when
-            supported (see <xref linkend="drm-kms-connector-polled"/>). The status
-            value is reported to userspace through ioctls and must not be used
-            inside the driver, as it only gets initialized by a call to
-            <function>drm_mode_getconnector</function> from userspace.
-          </para>
-          <synopsis>enum drm_connector_status (*detect)(struct drm_connector *connector,
-                                        bool force);</synopsis>
-          <para>
-            Check to see if anything is attached to the connector. The
-            <parameter>force</parameter> parameter is set to false whilst polling or
-            to true when checking the connector due to user request.
-            <parameter>force</parameter> can be used by the driver to avoid
-            expensive, destructive operations during automated probing.
-          </para>
-          <para>
-            Return connector_status_connected if something is connected to the
-            connector, connector_status_disconnected if nothing is connected and
-            connector_status_unknown if the connection state isn't known.
-          </para>
-          <para>
-            Drivers should only return connector_status_connected if the connection
-            status has really been probed as connected. Connectors that can't detect
-            the connection status, or failed connection status probes, should return
-            connector_status_unknown.
-          </para>
-        </sect4>
-      </sect3>
-    </sect2>
-    <sect2>
-      <title>Cleanup</title>
-      <para>
-        The DRM core manages its objects' lifetime. When an object is not needed
-       anymore the core calls its destroy function, which must clean up and
-       free every resource allocated for the object. Every
-       <function>drm_*_init</function> call must be matched with a
-       corresponding <function>drm_*_cleanup</function> call to cleanup CRTCs
-       (<function>drm_crtc_cleanup</function>), planes
-       (<function>drm_plane_cleanup</function>), encoders
-       (<function>drm_encoder_cleanup</function>) and connectors
-       (<function>drm_connector_cleanup</function>). Furthermore, connectors
-       that have been added to sysfs must be removed by a call to
-       <function>drm_connector_unregister</function> before calling
-       <function>drm_connector_cleanup</function>.
-      </para>
-      <para>
-        Connectors state change detection must be cleanup up with a call to
-       <function>drm_kms_helper_poll_fini</function>.
-      </para>
-    </sect2>
-    <sect2>
-      <title>Output discovery and initialization example</title>
-      <programlisting><![CDATA[
-void intel_crt_init(struct drm_device *dev)
-{
-       struct drm_connector *connector;
-       struct intel_output *intel_output;
-
-       intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
-       if (!intel_output)
-               return;
-
-       connector = &intel_output->base;
-       drm_connector_init(dev, &intel_output->base,
-                          &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
-
-       drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
-                        DRM_MODE_ENCODER_DAC);
-
-       drm_mode_connector_attach_encoder(&intel_output->base,
-                                         &intel_output->enc);
-
-       /* Set up the DDC bus. */
-       intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
-       if (!intel_output->ddc_bus) {
-               dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
-                          "failed.\n");
-               return;
-       }
-
-       intel_output->type = INTEL_OUTPUT_ANALOG;
-       connector->interlace_allowed = 0;
-       connector->doublescan_allowed = 0;
-
-       drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
-       drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
-
-       drm_connector_register(connector);
-}]]></programlisting>
-      <para>
-        In the example above (taken from the i915 driver), a CRTC, connector and
-        encoder combination is created. A device-specific i2c bus is also
-        created for fetching EDID data and performing monitor detection. Once
-        the process is complete, the new connector is registered with sysfs to
-        make its properties available to applications.
-      </para>
-    </sect2>
-    <sect2>
-      <title>KMS API Functions</title>
-!Edrivers/gpu/drm/drm_crtc.c
-    </sect2>
-    <sect2>
-      <title>KMS Data Structures</title>
-!Iinclude/drm/drm_crtc.h
-    </sect2>
-    <sect2>
-      <title>KMS Locking</title>
-!Pdrivers/gpu/drm/drm_modeset_lock.c kms locking
-!Iinclude/drm/drm_modeset_lock.h
-!Edrivers/gpu/drm/drm_modeset_lock.c
-    </sect2>
-  </sect1>
-
-  <!-- Internals: kms helper functions -->
-
-  <sect1>
-    <title>Mode Setting Helper Functions</title>
-    <para>
-      The plane, CRTC, encoder and connector functions provided by the drivers
-      implement the DRM API. They're called by the DRM core and ioctl handlers
-      to handle device state changes and configuration request. As implementing
-      those functions often requires logic not specific to drivers, mid-layer
-      helper functions are available to avoid duplicating boilerplate code.
-    </para>
-    <para>
-      The DRM core contains one mid-layer implementation. The mid-layer provides
-      implementations of several plane, CRTC, encoder and connector functions
-      (called from the top of the mid-layer) that pre-process requests and call
-      lower-level functions provided by the driver (at the bottom of the
-      mid-layer). For instance, the
-      <function>drm_crtc_helper_set_config</function> function can be used to
-      fill the struct <structname>drm_crtc_funcs</structname>
-      <structfield>set_config</structfield> field. When called, it will split
-      the <methodname>set_config</methodname> operation in smaller, simpler
-      operations and call the driver to handle them.
-    </para>
-    <para>
-      To use the mid-layer, drivers call <function>drm_crtc_helper_add</function>,
-      <function>drm_encoder_helper_add</function> and
-      <function>drm_connector_helper_add</function> functions to install their
-      mid-layer bottom operations handlers, and fill the
-      <structname>drm_crtc_funcs</structname>,
-      <structname>drm_encoder_funcs</structname> and
-      <structname>drm_connector_funcs</structname> structures with pointers to
-      the mid-layer top API functions. Installing the mid-layer bottom operation
-      handlers is best done right after registering the corresponding KMS object.
-    </para>
-    <para>
-      The mid-layer is not split between CRTC, encoder and connector operations.
-      To use it, a driver must provide bottom functions for all of the three KMS
-      entities.
-    </para>
-    <sect2>
-      <title>Atomic Modeset Helper Functions Reference</title>
-      <sect3>
-       <title>Overview</title>
-!Pdrivers/gpu/drm/drm_atomic_helper.c overview
-      </sect3>
-      <sect3>
-       <title>Implementing Asynchronous Atomic Commit</title>
-!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit
-      </sect3>
-      <sect3>
-       <title>Atomic State Reset and Initialization</title>
-!Pdrivers/gpu/drm/drm_atomic_helper.c atomic state reset and initialization
-      </sect3>
-!Iinclude/drm/drm_atomic_helper.h
-!Edrivers/gpu/drm/drm_atomic_helper.c
-    </sect2>
-    <sect2>
-      <title>Modeset Helper Reference for Common Vtables</title>
-!Iinclude/drm/drm_modeset_helper_vtables.h
-!Pinclude/drm/drm_modeset_helper_vtables.h overview
-    </sect2>
-    <sect2>
-      <title>Legacy CRTC/Modeset Helper Functions Reference</title>
-!Edrivers/gpu/drm/drm_crtc_helper.c
-!Pdrivers/gpu/drm/drm_crtc_helper.c overview
-    </sect2>
-    <sect2>
-      <title>Output Probing Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_probe_helper.c output probing helper overview
-!Edrivers/gpu/drm/drm_probe_helper.c
-    </sect2>
-    <sect2>
-      <title>fbdev Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
-!Edrivers/gpu/drm/drm_fb_helper.c
-!Iinclude/drm/drm_fb_helper.h
-    </sect2>
-    <sect2>
-      <title>Framebuffer CMA Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_fb_cma_helper.c framebuffer cma helper functions
-!Edrivers/gpu/drm/drm_fb_cma_helper.c
-    </sect2>
-    <sect2>
-      <title>Display Port Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
-!Iinclude/drm/drm_dp_helper.h
-!Edrivers/gpu/drm/drm_dp_helper.c
-    </sect2>
-    <sect2>
-      <title>Display Port Dual Mode Adaptor Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_dp_dual_mode_helper.c dp dual mode helpers
-!Iinclude/drm/drm_dp_dual_mode_helper.h
-!Edrivers/gpu/drm/drm_dp_dual_mode_helper.c
-    </sect2>
-    <sect2>
-      <title>Display Port MST Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
-!Iinclude/drm/drm_dp_mst_helper.h
-!Edrivers/gpu/drm/drm_dp_mst_topology.c
-    </sect2>
-    <sect2>
-      <title>MIPI DSI Helper Functions Reference</title>
-!Pdrivers/gpu/drm/drm_mipi_dsi.c dsi helpers
-!Iinclude/drm/drm_mipi_dsi.h
-!Edrivers/gpu/drm/drm_mipi_dsi.c
-    </sect2>
-    <sect2>
-      <title>EDID Helper Functions Reference</title>
-!Edrivers/gpu/drm/drm_edid.c
-    </sect2>
-    <sect2>
-      <title>Rectangle Utilities Reference</title>
-!Pinclude/drm/drm_rect.h rect utils
-!Iinclude/drm/drm_rect.h
-!Edrivers/gpu/drm/drm_rect.c
-    </sect2>
-    <sect2>
-      <title>Flip-work Helper Reference</title>
-!Pinclude/drm/drm_flip_work.h flip utils
-!Iinclude/drm/drm_flip_work.h
-!Edrivers/gpu/drm/drm_flip_work.c
-    </sect2>
-    <sect2>
-      <title>HDMI Infoframes Helper Reference</title>
-      <para>
-       Strictly speaking this is not a DRM helper library but generally useable
-       by any driver interfacing with HDMI outputs like v4l or alsa drivers.
-       But it nicely fits into the overall topic of mode setting helper
-       libraries and hence is also included here.
-      </para>
-!Iinclude/linux/hdmi.h
-!Edrivers/video/hdmi.c
-    </sect2>
-    <sect2>
-      <title id="drm-kms-planehelpers">Plane Helper Reference</title>
-!Edrivers/gpu/drm/drm_plane_helper.c
-!Pdrivers/gpu/drm/drm_plane_helper.c overview
-    </sect2>
-    <sect2>
-         <title>Tile group</title>
-!Pdrivers/gpu/drm/drm_crtc.c Tile group
-    </sect2>
-    <sect2>
-      <title>Bridges</title>
-      <sect3>
-        <title>Overview</title>
-!Pdrivers/gpu/drm/drm_bridge.c overview
-      </sect3>
-      <sect3>
-        <title>Default bridge callback sequence</title>
-!Pdrivers/gpu/drm/drm_bridge.c bridge callbacks
-      </sect3>
-!Edrivers/gpu/drm/drm_bridge.c
-    </sect2>
-    <sect2>
-      <title>Panel Helper Reference</title>
-!Iinclude/drm/drm_panel.h
-!Edrivers/gpu/drm/drm_panel.c
-!Pdrivers/gpu/drm/drm_panel.c drm panel
-    </sect2>
-  </sect1>
-
-  <!-- Internals: kms properties -->
-
-  <sect1 id="drm-kms-properties">
-    <title>KMS Properties</title>
-    <para>
-      Drivers may need to expose additional parameters to applications than
-      those described in the previous sections. KMS supports attaching
-      properties to CRTCs, connectors and planes and offers a userspace API to
-      list, get and set the property values.
-    </para>
-    <para>
-      Properties are identified by a name that uniquely defines the property
-      purpose, and store an associated value. For all property types except blob
-      properties the value is a 64-bit unsigned integer.
-    </para>
-    <para>
-      KMS differentiates between properties and property instances. Drivers
-      first create properties and then create and associate individual instances
-      of those properties to objects. A property can be instantiated multiple
-      times and associated with different objects. Values are stored in property
-      instances, and all other property information are stored in the property
-      and shared between all instances of the property.
-    </para>
-    <para>
-      Every property is created with a type that influences how the KMS core
-      handles the property. Supported property types are
-      <variablelist>
-        <varlistentry>
-          <term>DRM_MODE_PROP_RANGE</term>
-          <listitem><para>Range properties report their minimum and maximum
-            admissible values. The KMS core verifies that values set by
-            application fit in that range.</para></listitem>
-        </varlistentry>
-        <varlistentry>
-          <term>DRM_MODE_PROP_ENUM</term>
-          <listitem><para>Enumerated properties take a numerical value that
-            ranges from 0 to the number of enumerated values defined by the
-            property minus one, and associate a free-formed string name to each
-            value. Applications can retrieve the list of defined value-name pairs
-            and use the numerical value to get and set property instance values.
-            </para></listitem>
-        </varlistentry>
-        <varlistentry>
-          <term>DRM_MODE_PROP_BITMASK</term>
-          <listitem><para>Bitmask properties are enumeration properties that
-            additionally restrict all enumerated values to the 0..63 range.
-            Bitmask property instance values combine one or more of the
-            enumerated bits defined by the property.</para></listitem>
-        </varlistentry>
-        <varlistentry>
-          <term>DRM_MODE_PROP_BLOB</term>
-          <listitem><para>Blob properties store a binary blob without any format
-            restriction. The binary blobs are created as KMS standalone objects,
-            and blob property instance values store the ID of their associated
-            blob object.</para>
-           <para>Blob properties are only used for the connector EDID property
-           and cannot be created by drivers.</para></listitem>
-        </varlistentry>
-      </variablelist>
-    </para>
-    <para>
-      To create a property drivers call one of the following functions depending
-      on the property type. All property creation functions take property flags
-      and name, as well as type-specific arguments.
-      <itemizedlist>
-        <listitem>
-          <synopsis>struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
-                                               const char *name,
-                                               uint64_t min, uint64_t max);</synopsis>
-          <para>Create a range property with the given minimum and maximum
-            values.</para>
-        </listitem>
-        <listitem>
-          <synopsis>struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
-                                              const char *name,
-                                              const struct drm_prop_enum_list *props,
-                                              int num_values);</synopsis>
-          <para>Create an enumerated property. The <parameter>props</parameter>
-            argument points to an array of <parameter>num_values</parameter>
-            value-name pairs.</para>
-        </listitem>
-        <listitem>
-          <synopsis>struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
-                                                 int flags, const char *name,
-                                                 const struct drm_prop_enum_list *props,
-                                                 int num_values);</synopsis>
-          <para>Create a bitmask property. The <parameter>props</parameter>
-            argument points to an array of <parameter>num_values</parameter>
-            value-name pairs.</para>
-        </listitem>
-      </itemizedlist>
-    </para>
-    <para>
-      Properties can additionally be created as immutable, in which case they
-      will be read-only for applications but can be modified by the driver. To
-      create an immutable property drivers must set the DRM_MODE_PROP_IMMUTABLE
-      flag at property creation time.
-    </para>
-    <para>
-      When no array of value-name pairs is readily available at property
-      creation time for enumerated or range properties, drivers can create
-      the property using the <function>drm_property_create</function> function
-      and manually add enumeration value-name pairs by calling the
-      <function>drm_property_add_enum</function> function. Care must be taken to
-      properly specify the property type through the <parameter>flags</parameter>
-      argument.
-    </para>
-    <para>
-      After creating properties drivers can attach property instances to CRTC,
-      connector and plane objects by calling the
-      <function>drm_object_attach_property</function>. The function takes a
-      pointer to the target object, a pointer to the previously created property
-      and an initial instance value.
-    </para>
-    <sect2>
-       <title>Existing KMS Properties</title>
-       <para>
-       The following table gives description of drm properties exposed by various
-       modules/drivers.
-       </para>
-       <table border="1" cellpadding="0" cellspacing="0">
-       <tbody>
-       <tr style="font-weight: bold;">
-       <td valign="top" >Owner Module/Drivers</td>
-       <td valign="top" >Group</td>
-       <td valign="top" >Property Name</td>
-       <td valign="top" >Type</td>
-       <td valign="top" >Property Values</td>
-       <td valign="top" >Object attached</td>
-       <td valign="top" >Description/Restrictions</td>
-       </tr>
-       <tr>
-       <td rowspan="42" valign="top" >DRM</td>
-       <td rowspan="2" valign="top" >Generic</td>
-       <td valign="top" >“rotation”</td>
-       <td valign="top" >BITMASK</td>
-       <td valign="top" >{ 0, "rotate-0" },
-       { 1, "rotate-90" },
-       { 2, "rotate-180" },
-       { 3, "rotate-270" },
-       { 4, "reflect-x" },
-       { 5, "reflect-y" }</td>
-       <td valign="top" >CRTC, Plane</td>
-       <td valign="top" >rotate-(degrees) rotates the image by the specified amount in degrees
-       in counter clockwise direction. reflect-x and reflect-y reflects the
-       image along the specified axis prior to rotation</td>
-       </tr>
-       <tr>
-       <td valign="top" >“scaling mode”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "None", "Full", "Center", "Full aspect" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >Supported by: amdgpu, gma500, i915, nouveau and radeon.</td>
-       </tr>
-       <tr>
-       <td rowspan="5" valign="top" >Connector</td>
-       <td valign="top" >“EDID”</td>
-       <td valign="top" >BLOB | IMMUTABLE</td>
-       <td valign="top" >0</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >Contains id of edid blob ptr object.</td>
-       </tr>
-       <tr>
-       <td valign="top" >“DPMS”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ “On”, “Standby”, “Suspend”, “Off” }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >Contains DPMS operation mode value.</td>
-       </tr>
-       <tr>
-       <td valign="top" >“PATH”</td>
-       <td valign="top" >BLOB | IMMUTABLE</td>
-       <td valign="top" >0</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >Contains topology path to a connector.</td>
-       </tr>
-       <tr>
-       <td valign="top" >“TILE”</td>
-       <td valign="top" >BLOB | IMMUTABLE</td>
-       <td valign="top" >0</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >Contains tiling information for a connector.</td>
-       </tr>
-       <tr>
-       <td valign="top" >“CRTC_ID”</td>
-       <td valign="top" >OBJECT</td>
-       <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >CRTC that connector is attached to (atomic)</td>
-       </tr>
-       <tr>
-       <td rowspan="11" valign="top" >Plane</td>
-       <td valign="top" >“type”</td>
-       <td valign="top" >ENUM | IMMUTABLE</td>
-       <td valign="top" >{ "Overlay", "Primary", "Cursor" }</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Plane type</td>
-       </tr>
-       <tr>
-       <td valign="top" >“SRC_X”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=UINT_MAX</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Scanout source x coordinate in 16.16 fixed point (atomic)</td>
-       </tr>
-       <tr>
-       <td valign="top" >“SRC_Y”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=UINT_MAX</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Scanout source y coordinate in 16.16 fixed point (atomic)</td>
-       </tr>
-       <tr>
-       <td valign="top" >“SRC_W”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=UINT_MAX</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Scanout source width in 16.16 fixed point (atomic)</td>
-       </tr>
-       <tr>
-       <td valign="top" >“SRC_H”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=UINT_MAX</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Scanout source height in 16.16 fixed point (atomic)</td>
-       </tr>
-       <tr>
-       <td valign="top" >“CRTC_X”</td>
-       <td valign="top" >SIGNED_RANGE</td>
-       <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Scanout CRTC (destination) x coordinate (atomic)</td>
-       </tr>
-       <tr>
-       <td valign="top" >“CRTC_Y”</td>
-       <td valign="top" >SIGNED_RANGE</td>
-       <td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Scanout CRTC (destination) y coordinate (atomic)</td>
-       </tr>
-       <tr>
-       <td valign="top" >“CRTC_W”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=UINT_MAX</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Scanout CRTC (destination) width (atomic)</td>
-       </tr>
-       <tr>
-       <td valign="top" >“CRTC_H”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=UINT_MAX</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Scanout CRTC (destination) height (atomic)</td>
-       </tr>
-       <tr>
-       <td valign="top" >“FB_ID”</td>
-       <td valign="top" >OBJECT</td>
-       <td valign="top" >DRM_MODE_OBJECT_FB</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >Scanout framebuffer (atomic)</td>
-       </tr>
-       <tr>
-       <td valign="top" >“CRTC_ID”</td>
-       <td valign="top" >OBJECT</td>
-       <td valign="top" >DRM_MODE_OBJECT_CRTC</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >CRTC that plane is attached to (atomic)</td>
-       </tr>
-       <tr>
-       <td rowspan="2" valign="top" >DVI-I</td>
-       <td valign="top" >“subconnector”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ “Unknown”, “DVI-D”, “DVI-A” }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“select subconnector”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ “Automatic”, “DVI-D”, “DVI-A” }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="13" valign="top" >TV</td>
-       <td valign="top" >“subconnector”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "Unknown", "Composite", "SVIDEO", "Component", "SCART" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“select subconnector”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "Automatic", "Composite", "SVIDEO", "Component", "SCART" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“mode”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“left margin”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“right margin”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“top margin”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“bottom margin”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“brightness”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“contrast”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“flicker reduction”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“overscan”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“saturation”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“hue”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="2" valign="top" >Virtual GPU</td>
-       <td valign="top" >“suggested X”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0xffffffff</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >property to suggest an X offset for a connector</td>
-       </tr>
-       <tr>
-       <td valign="top" >“suggested Y”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0xffffffff</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >property to suggest an Y offset for a connector</td>
-       </tr>
-       <tr>
-       <td rowspan="7" valign="top" >Optional</td>
-       <td valign="top" >"aspect ratio"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "None", "4:3", "16:9" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TDB</td>
-       </tr>
-       <tr>
-       <td valign="top" >“dirty”</td>
-       <td valign="top" >ENUM | IMMUTABLE</td>
-       <td valign="top" >{ "Off", "On", "Annotate" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“DEGAMMA_LUT”</td>
-       <td valign="top" >BLOB</td>
-       <td valign="top" >0</td>
-       <td valign="top" >CRTC</td>
-       <td valign="top" >DRM property to set the degamma lookup table
-               (LUT) mapping pixel data from the framebuffer before it is
-               given to the transformation matrix. The data is an interpreted
-               as an array of struct drm_color_lut elements. Hardware might
-               choose not to use the full precision of the LUT elements nor
-               use all the elements of the LUT (for example the hardware
-               might choose to interpolate between LUT[0] and LUT[4]). </td>
-       </tr>
-       <tr>
-       <td valign="top" >“DEGAMMA_LUT_SIZE”</td>
-       <td valign="top" >RANGE | IMMUTABLE</td>
-       <td valign="top" >Min=0, Max=UINT_MAX</td>
-       <td valign="top" >CRTC</td>
-       <td valign="top" >DRM property to gives the size of the lookup
-               table to be set on the DEGAMMA_LUT property (the size depends
-               on the underlying hardware).</td>
-       </tr>
-       <tr>
-       <td valign="top" >“CTM”</td>
-       <td valign="top" >BLOB</td>
-       <td valign="top" >0</td>
-       <td valign="top" >CRTC</td>
-       <td valign="top" >DRM property to set the current
-               transformation matrix (CTM) apply to pixel data after the
-               lookup through the degamma LUT and before the lookup through
-               the gamma LUT. The data is an interpreted as a struct
-               drm_color_ctm.</td>
-       </tr>
-       <tr>
-       <td valign="top" >“GAMMA_LUT”</td>
-       <td valign="top" >BLOB</td>
-       <td valign="top" >0</td>
-       <td valign="top" >CRTC</td>
-       <td valign="top" >DRM property to set the gamma lookup table
-               (LUT) mapping pixel data after to the transformation matrix to
-               data sent to the connector. The data is an interpreted as an
-               array of struct drm_color_lut elements. Hardware might choose
-               not to use the full precision of the LUT elements nor use all
-               the elements of the LUT (for example the hardware might choose
-               to interpolate between LUT[0] and LUT[4]).</td>
-       </tr>
-       <tr>
-       <td valign="top" >“GAMMA_LUT_SIZE”</td>
-       <td valign="top" >RANGE | IMMUTABLE</td>
-       <td valign="top" >Min=0, Max=UINT_MAX</td>
-       <td valign="top" >CRTC</td>
-       <td valign="top" >DRM property to gives the size of the lookup
-               table to be set on the GAMMA_LUT property (the size depends on
-               the underlying hardware).</td>
-       </tr>
-       <tr>
-       <td rowspan="20" valign="top" >i915</td>
-       <td rowspan="2" valign="top" >Generic</td>
-       <td valign="top" >"Broadcast RGB"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >When this property is set to Limited 16:235
-               and CTM is set, the hardware will be programmed with the
-               result of the multiplication of CTM by the limited range
-               matrix to ensure the pixels normaly in the range 0..1.0 are
-               remapped to the range 16/255..235/255.</td>
-       </tr>
-       <tr>
-       <td valign="top" >“audio”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "force-dvi", "off", "auto", "on" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="17" valign="top" >SDVO-TV</td>
-       <td valign="top" >“mode”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"left_margin"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"right_margin"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"top_margin"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"bottom_margin"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“hpos”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“vpos”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“contrast”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“saturation”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“hue”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“sharpness”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“flicker_filter”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“flicker_filter_adaptive”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“flicker_filter_2d”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“tv_chroma_filter”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“tv_luma_filter”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“dot_crawl”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=1</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >SDVO-TV/LVDS</td>
-       <td valign="top" >“brightness”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="2" valign="top" >CDV gma-500</td>
-       <td rowspan="2" valign="top" >Generic</td>
-       <td valign="top" >"Broadcast RGB"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ “Full”, “Limited 16:235” }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"Broadcast RGB"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ “off”, “auto”, “on” }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="19" valign="top" >Poulsbo</td>
-       <td rowspan="1" valign="top" >Generic</td>
-       <td valign="top" >“backlight”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=100</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="17" valign="top" >SDVO-TV</td>
-       <td valign="top" >“mode”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "NTSC_M", "NTSC_J", "NTSC_443", "PAL_B" } etc.</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"left_margin"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"right_margin"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"top_margin"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"bottom_margin"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“hpos”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“vpos”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“contrast”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“saturation”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“hue”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“sharpness”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“flicker_filter”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“flicker_filter_adaptive”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“flicker_filter_2d”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“tv_chroma_filter”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“tv_luma_filter”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“dot_crawl”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=1</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >SDVO-TV/LVDS</td>
-       <td valign="top" >“brightness”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max= SDVO dependent</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="11" valign="top" >armada</td>
-       <td rowspan="2" valign="top" >CRTC</td>
-       <td valign="top" >"CSC_YUV"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "Auto" , "CCIR601", "CCIR709" }</td>
-       <td valign="top" >CRTC</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"CSC_RGB"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "Auto", "Computer system", "Studio" }</td>
-       <td valign="top" >CRTC</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="9" valign="top" >Overlay</td>
-       <td valign="top" >"colorkey"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0xffffff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"colorkey_min"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0xffffff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"colorkey_max"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0xffffff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"colorkey_val"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0xffffff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"colorkey_alpha"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0xffffff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"colorkey_mode"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "disabled", "Y component", "U component"
-       , "V component", "RGB", “R component", "G component", "B component" }</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"brightness"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=256 + 255</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"contrast"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0x7fff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"saturation"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0x7fff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="2" valign="top" >exynos</td>
-       <td valign="top" >CRTC</td>
-       <td valign="top" >“mode”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "normal", "blank" }</td>
-       <td valign="top" >CRTC</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >Overlay</td>
-       <td valign="top" >“zpos”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=MAX_PLANE-1</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="2" valign="top" >i2c/ch7006_drv</td>
-       <td valign="top" >Generic</td>
-       <td valign="top" >“scale”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=2</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="1" valign="top" >TV</td>
-       <td valign="top" >“mode”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "PAL", "PAL-M","PAL-N"}, ”PAL-Nc"
-       , "PAL-60", "NTSC-M", "NTSC-J" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="15" valign="top" >nouveau</td>
-       <td rowspan="6" valign="top" >NV10 Overlay</td>
-       <td valign="top" >"colorkey"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0x01ffffff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“contrast”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=8192-1</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“brightness”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=1024</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“hue”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=359</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“saturation”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=8192-1</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“iturbt_709”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=1</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="2" valign="top" >Nv04 Overlay</td>
-       <td valign="top" >“colorkey”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0x01ffffff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“brightness”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=1024</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="7" valign="top" >Display</td>
-       <td valign="top" >“dithering mode”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "auto", "off", "on" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“dithering depth”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "auto", "off", "on", "static 2x2", "dynamic 2x2", "temporal" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“underscan”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "auto", "6 bpc", "8 bpc" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“underscan hborder”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=128</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“underscan vborder”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=128</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“vibrant hue”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=180</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >“color vibrance”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=200</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >omap</td>
-       <td valign="top" >Generic</td>
-       <td valign="top" >“zorder”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=3</td>
-       <td valign="top" >CRTC, Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >qxl</td>
-       <td valign="top" >Generic</td>
-       <td valign="top" >“hotplug_mode_update"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=1</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="9" valign="top" >radeon</td>
-       <td valign="top" >DVI-I</td>
-       <td valign="top" >“coherent”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=1</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >DAC enable load detect</td>
-       <td valign="top" >“load detection”</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=1</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >TV Standard</td>
-       <td valign="top" >"tv standard"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "ntsc", "pal", "pal-m", "pal-60", "ntsc-j"
-       , "scart-pal", "pal-cn", "secam" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >legacy TMDS PLL detect</td>
-       <td valign="top" >"tmds_pll"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "driver", "bios" }</td>
-       <td valign="top" >-</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="3" valign="top" >Underscan</td>
-       <td valign="top" >"underscan"</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "off", "on", "auto" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"underscan hborder"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=128</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"underscan vborder"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=128</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >Audio</td>
-       <td valign="top" >“audio”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "off", "on", "auto" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >FMT Dithering</td>
-       <td valign="top" >“dither”</td>
-       <td valign="top" >ENUM</td>
-       <td valign="top" >{ "off", "on" }</td>
-       <td valign="top" >Connector</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td rowspan="3" valign="top" >rcar-du</td>
-       <td rowspan="3" valign="top" >Generic</td>
-       <td valign="top" >"alpha"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=255</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"colorkey"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=0, Max=0x01ffffff</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       <tr>
-       <td valign="top" >"zpos"</td>
-       <td valign="top" >RANGE</td>
-       <td valign="top" >Min=1, Max=7</td>
-       <td valign="top" >Plane</td>
-       <td valign="top" >TBD</td>
-       </tr>
-       </tbody>
-       </table>
-    </sect2>
-  </sect1>
-
-  <!-- Internals: vertical blanking -->
-
-  <sect1 id="drm-vertical-blank">
-    <title>Vertical Blanking</title>
-    <para>
-      Vertical blanking plays a major role in graphics rendering. To achieve
-      tear-free display, users must synchronize page flips and/or rendering to
-      vertical blanking. The DRM API offers ioctls to perform page flips
-      synchronized to vertical blanking and wait for vertical blanking.
-    </para>
-    <para>
-      The DRM core handles most of the vertical blanking management logic, which
-      involves filtering out spurious interrupts, keeping race-free blanking
-      counters, coping with counter wrap-around and resets and keeping use
-      counts. It relies on the driver to generate vertical blanking interrupts
-      and optionally provide a hardware vertical blanking counter. Drivers must
-      implement the following operations.
-    </para>
-    <itemizedlist>
-      <listitem>
-        <synopsis>int (*enable_vblank) (struct drm_device *dev, int crtc);
-void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis>
-        <para>
-         Enable or disable vertical blanking interrupts for the given CRTC.
-       </para>
-      </listitem>
-      <listitem>
-        <synopsis>u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);</synopsis>
-        <para>
-         Retrieve the value of the vertical blanking counter for the given
-         CRTC. If the hardware maintains a vertical blanking counter its value
-         should be returned. Otherwise drivers can use the
-         <function>drm_vblank_count</function> helper function to handle this
-         operation.
-       </para>
-      </listitem>
-    </itemizedlist>
-    <para>
-      Drivers must initialize the vertical blanking handling core with a call to
-      <function>drm_vblank_init</function> in their
-      <methodname>load</methodname> operation. The function will set the struct
-      <structname>drm_device</structname>
-      <structfield>vblank_disable_allowed</structfield> field to 0. This will
-      keep vertical blanking interrupts enabled permanently until the first mode
-      set operation, where <structfield>vblank_disable_allowed</structfield> is
-      set to 1. The reason behind this is not clear. Drivers can set the field
-      to 1 after <function>calling drm_vblank_init</function> to make vertical
-      blanking interrupts dynamically managed from the beginning.
-    </para>
-    <para>
-      Vertical blanking interrupts can be enabled by the DRM core or by drivers
-      themselves (for instance to handle page flipping operations). The DRM core
-      maintains a vertical blanking use count to ensure that the interrupts are
-      not disabled while a user still needs them. To increment the use count,
-      drivers call <function>drm_vblank_get</function>. Upon return vertical
-      blanking interrupts are guaranteed to be enabled.
-    </para>
-    <para>
-      To decrement the use count drivers call
-      <function>drm_vblank_put</function>. Only when the use count drops to zero
-      will the DRM core disable the vertical blanking interrupts after a delay
-      by scheduling a timer. The delay is accessible through the vblankoffdelay
-      module parameter or the <varname>drm_vblank_offdelay</varname> global
-      variable and expressed in milliseconds. Its default value is 5000 ms.
-      Zero means never disable, and a negative value means disable immediately.
-      Drivers may override the behaviour by setting the
-      <structname>drm_device</structname>
-      <structfield>vblank_disable_immediate</structfield> flag, which when set
-      causes vblank interrupts to be disabled immediately regardless of the
-      drm_vblank_offdelay value. The flag should only be set if there's a
-      properly working hardware vblank counter present.
-    </para>
-    <para>
-      When a vertical blanking interrupt occurs drivers only need to call the
-      <function>drm_handle_vblank</function> function to account for the
-      interrupt.
-    </para>
-    <para>
-      Resources allocated by <function>drm_vblank_init</function> must be freed
-      with a call to <function>drm_vblank_cleanup</function> in the driver
-      <methodname>unload</methodname> operation handler.
-    </para>
-    <sect2>
-      <title>Vertical Blanking and Interrupt Handling Functions Reference</title>
-!Edrivers/gpu/drm/drm_irq.c
-!Finclude/drm/drmP.h drm_crtc_vblank_waitqueue
-    </sect2>
-  </sect1>
-
-  <!-- Internals: open/close, file operations and ioctls -->
-
-  <sect1>
-    <title>Open/Close, File Operations and IOCTLs</title>
-    <sect2>
-      <title>Open and Close</title>
-      <synopsis>int (*firstopen) (struct drm_device *);
-void (*lastclose) (struct drm_device *);
-int (*open) (struct drm_device *, struct drm_file *);
-void (*preclose) (struct drm_device *, struct drm_file *);
-void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
-      <abstract>Open and close handlers. None of those methods are mandatory.
-      </abstract>
-      <para>
-        The <methodname>firstopen</methodname> method is called by the DRM core
-       for legacy UMS (User Mode Setting) drivers only when an application
-       opens a device that has no other opened file handle. UMS drivers can
-       implement it to acquire device resources. KMS drivers can't use the
-       method and must acquire resources in the <methodname>load</methodname>
-       method instead.
-      </para>
-      <para>
-       Similarly the <methodname>lastclose</methodname> method is called when
-       the last application holding a file handle opened on the device closes
-       it, for both UMS and KMS drivers. Additionally, the method is also
-       called at module unload time or, for hot-pluggable devices, when the
-       device is unplugged. The <methodname>firstopen</methodname> and
-       <methodname>lastclose</methodname> calls can thus be unbalanced.
-      </para>
-      <para>
-        The <methodname>open</methodname> method is called every time the device
-       is opened by an application. Drivers can allocate per-file private data
-       in this method and store them in the struct
-       <structname>drm_file</structname> <structfield>driver_priv</structfield>
-       field. Note that the <methodname>open</methodname> method is called
-       before <methodname>firstopen</methodname>.
-      </para>
-      <para>
-        The close operation is split into <methodname>preclose</methodname> and
-       <methodname>postclose</methodname> methods. Drivers must stop and
-       cleanup all per-file operations in the <methodname>preclose</methodname>
-       method. For instance pending vertical blanking and page flip events must
-       be cancelled. No per-file operation is allowed on the file handle after
-       returning from the <methodname>preclose</methodname> method.
-      </para>
-      <para>
-        Finally the <methodname>postclose</methodname> method is called as the
-       last step of the close operation, right before calling the
-       <methodname>lastclose</methodname> method if no other open file handle
-       exists for the device. Drivers that have allocated per-file private data
-       in the <methodname>open</methodname> method should free it here.
-      </para>
-      <para>
-        The <methodname>lastclose</methodname> method should restore CRTC and
-       plane properties to default value, so that a subsequent open of the
-       device will not inherit state from the previous user. It can also be
-       used to execute delayed power switching state changes, e.g. in
-       conjunction with the vga_switcheroo infrastructure (see
-       <xref linkend="vga_switcheroo"/>). Beyond that KMS drivers should not
-       do any further cleanup. Only legacy UMS drivers might need to clean up
-       device state so that the vga console or an independent fbdev driver
-       could take over.
-      </para>
-    </sect2>
-    <sect2>
-      <title>File Operations</title>
-!Pdrivers/gpu/drm/drm_fops.c file operations
-!Edrivers/gpu/drm/drm_fops.c
-    </sect2>
-    <sect2>
-      <title>IOCTLs</title>
-      <synopsis>struct drm_ioctl_desc *ioctls;
-int num_ioctls;</synopsis>
-      <abstract>Driver-specific ioctls descriptors table.</abstract>
-      <para>
-        Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
-       descriptors table is indexed by the ioctl number offset from the base
-       value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize the
-       table entries.
-      </para>
-      <para>
-        <programlisting>DRM_IOCTL_DEF_DRV(ioctl, func, flags)</programlisting>
-       <para>
-         <parameter>ioctl</parameter> is the ioctl name. Drivers must define
-         the DRM_##ioctl and DRM_IOCTL_##ioctl macros to the ioctl number
-         offset from DRM_COMMAND_BASE and the ioctl number respectively. The
-         first macro is private to the device while the second must be exposed
-         to userspace in a public header.
-       </para>
-       <para>
-         <parameter>func</parameter> is a pointer to the ioctl handler function
-         compatible with the <type>drm_ioctl_t</type> type.
-         <programlisting>typedef int drm_ioctl_t(struct drm_device *dev, void *data,
-               struct drm_file *file_priv);</programlisting>
-       </para>
-       <para>
-         <parameter>flags</parameter> is a bitmask combination of the following
-         values. It restricts how the ioctl is allowed to be called.
-         <itemizedlist>
-           <listitem><para>
-             DRM_AUTH - Only authenticated callers allowed
-           </para></listitem>
-           <listitem><para>
-             DRM_MASTER - The ioctl can only be called on the master file
-             handle
-           </para></listitem>
-            <listitem><para>
-             DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
-           </para></listitem>
-            <listitem><para>
-             DRM_CONTROL_ALLOW - The ioctl can only be called on a control
-             device
-           </para></listitem>
-            <listitem><para>
-             DRM_UNLOCKED - The ioctl handler will be called without locking
-             the DRM global mutex. This is the enforced default for kms drivers
-             (i.e. using the DRIVER_MODESET flag) and hence shouldn't be used
-             any more for new drivers.
-           </para></listitem>
-         </itemizedlist>
-       </para>
-      </para>
-!Edrivers/gpu/drm/drm_ioctl.c
-    </sect2>
-  </sect1>
-  <sect1>
-    <title>Legacy Support Code</title>
-    <para>
-      The section very briefly covers some of the old legacy support code which
-      is only used by old DRM drivers which have done a so-called shadow-attach
-      to the underlying device instead of registering as a real driver. This
-      also includes some of the old generic buffer management and command
-      submission code. Do not use any of this in new and modern drivers.
-    </para>
-
-    <sect2>
-      <title>Legacy Suspend/Resume</title>
-      <para>
-       The DRM core provides some suspend/resume code, but drivers wanting full
-       suspend/resume support should provide save() and restore() functions.
-       These are called at suspend, hibernate, or resume time, and should perform
-       any state save or restore required by your device across suspend or
-       hibernate states.
-      </para>
-      <synopsis>int (*suspend) (struct drm_device *, pm_message_t state);
-  int (*resume) (struct drm_device *);</synopsis>
-      <para>
-       Those are legacy suspend and resume methods which
-       <emphasis>only</emphasis> work with the legacy shadow-attach driver
-       registration functions. New driver should use the power management
-       interface provided by their bus type (usually through
-       the struct <structname>device_driver</structname> dev_pm_ops) and set
-       these methods to NULL.
-      </para>
-    </sect2>
-
-    <sect2>
-      <title>Legacy DMA Services</title>
-      <para>
-       This should cover how DMA mapping etc. is supported by the core.
-       These functions are deprecated and should not be used.
-      </para>
-    </sect2>
-  </sect1>
-  </chapter>
-
-<!-- TODO
-
-- Add a glossary
-- Document the struct_mutex catch-all lock
-- Document connector properties
-
-- Why is the load method optional?
-- What are drivers supposed to set the initial display state to, and how?
-  Connector's DPMS states are not initialized and are thus equal to
-  DRM_MODE_DPMS_ON. The fbcon compatibility layer calls
-  drm_helper_disable_unused_functions(), which disables unused encoders and
-  CRTCs, but doesn't touch the connectors' DPMS state, and
-  drm_helper_connector_dpms() in reaction to fbdev blanking events. Do drivers
-  that don't implement (or just don't use) fbcon compatibility need to call
-  those functions themselves?
-- KMS drivers must call drm_vblank_pre_modeset() and drm_vblank_post_modeset()
-  around mode setting. Should this be done in the DRM core?
-- vblank_disable_allowed is set to 1 in the first drm_vblank_post_modeset()
-  call and never set back to 0. It seems to be safe to permanently set it to 1
-  in drm_vblank_init() for KMS driver, and it might be safe for UMS drivers as
-  well. This should be investigated.
-- crtc and connector .save and .restore operations are only used internally in
-  drivers, should they be removed from the core?
-- encoder mid-layer .save and .restore operations are only used internally in
-  drivers, should they be removed from the core?
-- encoder mid-layer .detect operation is only used internally in drivers,
-  should it be removed from the core?
--->
-
-  <!-- External interfaces -->
-
-  <chapter id="drmExternals">
-    <title>Userland interfaces</title>
-    <para>
-      The DRM core exports several interfaces to applications,
-      generally intended to be used through corresponding libdrm
-      wrapper functions.  In addition, drivers export device-specific
-      interfaces for use by userspace drivers &amp; device-aware
-      applications through ioctls and sysfs files.
-    </para>
-    <para>
-      External interfaces include: memory mapping, context management,
-      DMA operations, AGP management, vblank control, fence
-      management, memory management, and output management.
-    </para>
-    <para>
-      Cover generic ioctls and sysfs layout here.  We only need high-level
-      info, since man pages should cover the rest.
-    </para>
-
-  <!-- External: render nodes -->
-
-    <sect1>
-      <title>Render nodes</title>
-      <para>
-        DRM core provides multiple character-devices for user-space to use.
-        Depending on which device is opened, user-space can perform a different
-        set of operations (mainly ioctls). The primary node is always created
-        and called card&lt;num&gt;. Additionally, a currently
-        unused control node, called controlD&lt;num&gt; is also
-        created. The primary node provides all legacy operations and
-        historically was the only interface used by userspace. With KMS, the
-        control node was introduced. However, the planned KMS control interface
-        has never been written and so the control node stays unused to date.
-      </para>
-      <para>
-        With the increased use of offscreen renderers and GPGPU applications,
-        clients no longer require running compositors or graphics servers to
-        make use of a GPU. But the DRM API required unprivileged clients to
-        authenticate to a DRM-Master prior to getting GPU access. To avoid this
-        step and to grant clients GPU access without authenticating, render
-        nodes were introduced. Render nodes solely serve render clients, that
-        is, no modesetting or privileged ioctls can be issued on render nodes.
-        Only non-global rendering commands are allowed. If a driver supports
-        render nodes, it must advertise it via the DRIVER_RENDER
-        DRM driver capability. If not supported, the primary node must be used
-        for render clients together with the legacy drmAuth authentication
-        procedure.
-      </para>
-      <para>
-        If a driver advertises render node support, DRM core will create a
-        separate render node called renderD&lt;num&gt;. There will
-        be one render node per device. No ioctls except  PRIME-related ioctls
-        will be allowed on this node. Especially GEM_OPEN will be
-        explicitly prohibited. Render nodes are designed to avoid the
-        buffer-leaks, which occur if clients guess the flink names or mmap
-        offsets on the legacy interface. Additionally to this basic interface,
-        drivers must mark their driver-dependent render-only ioctls as
-        DRM_RENDER_ALLOW so render clients can use them. Driver
-        authors must be careful not to allow any privileged ioctls on render
-        nodes.
-      </para>
-      <para>
-        With render nodes, user-space can now control access to the render node
-        via basic file-system access-modes. A running graphics server which
-        authenticates clients on the privileged primary/legacy node is no longer
-        required. Instead, a client can open the render node and is immediately
-        granted GPU access. Communication between clients (or servers) is done
-        via PRIME. FLINK from render node to legacy node is not supported. New
-        clients must not use the insecure FLINK interface.
-      </para>
-      <para>
-        Besides dropping all modeset/global ioctls, render nodes also drop the
-        DRM-Master concept. There is no reason to associate render clients with
-        a DRM-Master as they are independent of any graphics server. Besides,
-        they must work without any running master, anyway.
-        Drivers must be able to run without a master object if they support
-        render nodes. If, on the other hand, a driver requires shared state
-        between clients which is visible to user-space and accessible beyond
-        open-file boundaries, they cannot support render nodes.
-      </para>
-    </sect1>
-
-  <!-- External: vblank handling -->
-
-    <sect1>
-      <title>VBlank event handling</title>
-      <para>
-        The DRM core exposes two vertical blank related ioctls:
-        <variablelist>
-          <varlistentry>
-            <term>DRM_IOCTL_WAIT_VBLANK</term>
-            <listitem>
-              <para>
-                This takes a struct drm_wait_vblank structure as its argument,
-                and it is used to block or request a signal when a specified
-                vblank event occurs.
-              </para>
-            </listitem>
-          </varlistentry>
-          <varlistentry>
-            <term>DRM_IOCTL_MODESET_CTL</term>
-            <listitem>
-              <para>
-               This was only used for user-mode-settind drivers around
-               modesetting changes to allow the kernel to update the vblank
-               interrupt after mode setting, since on many devices the vertical
-               blank counter is reset to 0 at some point during modeset. Modern
-               drivers should not call this any more since with kernel mode
-               setting it is a no-op.
-              </para>
-            </listitem>
-          </varlistentry>
-        </variablelist>
-      </para>
-    </sect1>
-
-  </chapter>
-</part>
-<part id="drmDrivers">
-  <title>DRM Drivers</title>
-
-  <partintro>
-    <para>
-      This second part of the GPU Driver Developer's Guide documents driver
-      code, implementation details and also all the driver-specific userspace
-      interfaces. Especially since all hardware-acceleration interfaces to
-      userspace are driver specific for efficiency and other reasons these
-      interfaces can be rather substantial. Hence every driver has its own
-      chapter.
-    </para>
-  </partintro>
-
-  <chapter id="drmI915">
-    <title>drm/i915 Intel GFX Driver</title>
-    <para>
-      The drm/i915 driver supports all (with the exception of some very early
-      models) integrated GFX chipsets with both Intel display and rendering
-      blocks. This excludes a set of SoC platforms with an SGX rendering unit,
-      those have basic support through the gma500 drm driver.
-    </para>
-    <sect1>
-      <title>Core Driver Infrastructure</title>
-      <para>
-       This section covers core driver infrastructure used by both the display
-       and the GEM parts of the driver.
-      </para>
-      <sect2>
-        <title>Runtime Power Management</title>
-!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
-!Idrivers/gpu/drm/i915/intel_runtime_pm.c
-!Idrivers/gpu/drm/i915/intel_uncore.c
-      </sect2>
-      <sect2>
-        <title>Interrupt Handling</title>
-!Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
-!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
-      </sect2>
-      <sect2>
-        <title>Intel GVT-g Guest Support(vGPU)</title>
-!Pdrivers/gpu/drm/i915/i915_vgpu.c Intel GVT-g guest support
-!Idrivers/gpu/drm/i915/i915_vgpu.c
-      </sect2>
-    </sect1>
-    <sect1>
-      <title>Display Hardware Handling</title>
-      <para>
-        This section covers everything related to the display hardware including
-        the mode setting infrastructure, plane, sprite and cursor handling and
-        display, output probing and related topics.
-      </para>
-      <sect2>
-        <title>Mode Setting Infrastructure</title>
-        <para>
-          The i915 driver is thus far the only DRM driver which doesn't use the
-          common DRM helper code to implement mode setting sequences. Thus it
-          has its own tailor-made infrastructure for executing a display
-          configuration change.
-        </para>
-      </sect2>
-      <sect2>
-        <title>Frontbuffer Tracking</title>
-!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
-!Idrivers/gpu/drm/i915/intel_frontbuffer.c
-!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
-      </sect2>
-      <sect2>
-        <title>Display FIFO Underrun Reporting</title>
-!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling
-!Idrivers/gpu/drm/i915/intel_fifo_underrun.c
-      </sect2>
-      <sect2>
-        <title>Plane Configuration</title>
-        <para>
-         This section covers plane configuration and composition with the
-         primary plane, sprites, cursors and overlays. This includes the
-         infrastructure to do atomic vsync'ed updates of all this state and
-         also tightly coupled topics like watermark setup and computation,
-         framebuffer compression and panel self refresh.
-        </para>
-      </sect2>
-      <sect2>
-        <title>Atomic Plane Helpers</title>
-!Pdrivers/gpu/drm/i915/intel_atomic_plane.c atomic plane helpers
-!Idrivers/gpu/drm/i915/intel_atomic_plane.c
-      </sect2>
-      <sect2>
-        <title>Output Probing</title>
-        <para>
-         This section covers output probing and related infrastructure like the
-         hotplug interrupt storm detection and mitigation code. Note that the
-         i915 driver still uses most of the common DRM helper code for output
-         probing, so those sections fully apply.
-        </para>
-      </sect2>
-      <sect2>
-        <title>Hotplug</title>
-!Pdrivers/gpu/drm/i915/intel_hotplug.c Hotplug
-!Idrivers/gpu/drm/i915/intel_hotplug.c
-      </sect2>
-      <sect2>
-       <title>High Definition Audio</title>
-!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
-!Idrivers/gpu/drm/i915/intel_audio.c
-!Iinclude/drm/i915_component.h
-      </sect2>
-      <sect2>
-       <title>Panel Self Refresh PSR (PSR/SRD)</title>
-!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
-!Idrivers/gpu/drm/i915/intel_psr.c
-      </sect2>
-      <sect2>
-       <title>Frame Buffer Compression (FBC)</title>
-!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
-!Idrivers/gpu/drm/i915/intel_fbc.c
-      </sect2>
-      <sect2>
-        <title>Display Refresh Rate Switching (DRRS)</title>
-!Pdrivers/gpu/drm/i915/intel_dp.c Display Refresh Rate Switching (DRRS)
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_set_drrs_state
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_enable
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_disable
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_invalidate
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_edp_drrs_flush
-!Fdrivers/gpu/drm/i915/intel_dp.c intel_dp_drrs_init
-
-      </sect2>
-      <sect2>
-        <title>DPIO</title>
-!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
-      </sect2>
-
-      <sect2>
-       <title>CSR firmware support for DMC</title>
-!Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc
-!Idrivers/gpu/drm/i915/intel_csr.c
-      </sect2>
-      <sect2>
-       <title>Video BIOS Table (VBT)</title>
-!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
-!Idrivers/gpu/drm/i915/intel_bios.c
-!Idrivers/gpu/drm/i915/intel_vbt_defs.h
-      </sect2>
-    </sect1>
-
-    <sect1>
-      <title>Memory Management and Command Submission</title>
-      <para>
-       This sections covers all things related to the GEM implementation in the
-       i915 driver.
-      </para>
-      <sect2>
-        <title>Batchbuffer Parsing</title>
-!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
-!Idrivers/gpu/drm/i915/i915_cmd_parser.c
-      </sect2>
-      <sect2>
-        <title>Batchbuffer Pools</title>
-!Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
-!Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
-      </sect2>
-      <sect2>
-        <title>Logical Rings, Logical Ring Contexts and Execlists</title>
-!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
-!Idrivers/gpu/drm/i915/intel_lrc.c
-      </sect2>
-      <sect2>
-        <title>Global GTT views</title>
-!Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
-!Idrivers/gpu/drm/i915/i915_gem_gtt.c
-      </sect2>
-      <sect2>
-        <title>GTT Fences and Swizzling</title>
-!Idrivers/gpu/drm/i915/i915_gem_fence.c
-        <sect3>
-          <title>Global GTT Fence Handling</title>
-!Pdrivers/gpu/drm/i915/i915_gem_fence.c fence register handling
-        </sect3>
-        <sect3>
-          <title>Hardware Tiling and Swizzling Details</title>
-!Pdrivers/gpu/drm/i915/i915_gem_fence.c tiling swizzling details
-        </sect3>
-      </sect2>
-      <sect2>
-        <title>Object Tiling IOCTLs</title>
-!Idrivers/gpu/drm/i915/i915_gem_tiling.c
-!Pdrivers/gpu/drm/i915/i915_gem_tiling.c buffer object tiling
-      </sect2>
-      <sect2>
-        <title>Buffer Object Eviction</title>
-       <para>
-         This section documents the interface functions for evicting buffer
-         objects to make space available in the virtual gpu address spaces.
-         Note that this is mostly orthogonal to shrinking buffer objects
-         caches, which has the goal to make main memory (shared with the gpu
-         through the unified memory architecture) available.
-       </para>
-!Idrivers/gpu/drm/i915/i915_gem_evict.c
-      </sect2>
-      <sect2>
-        <title>Buffer Object Memory Shrinking</title>
-       <para>
-         This section documents the interface function for shrinking memory
-         usage of buffer object caches. Shrinking is used to make main memory
-         available.  Note that this is mostly orthogonal to evicting buffer
-         objects, which has the goal to make space in gpu virtual address
-         spaces.
-       </para>
-!Idrivers/gpu/drm/i915/i915_gem_shrinker.c
-      </sect2>
-    </sect1>
-    <sect1>
-      <title>GuC</title>
-      <sect2>
-        <title>GuC-specific firmware loader</title>
-!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
-!Idrivers/gpu/drm/i915/intel_guc_loader.c
-      </sect2>
-      <sect2>
-        <title>GuC-based command submission</title>
-!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submission
-!Idrivers/gpu/drm/i915/i915_guc_submission.c
-      </sect2>
-      <sect2>
-        <title>GuC Firmware Layout</title>
-!Pdrivers/gpu/drm/i915/intel_guc_fwif.h GuC Firmware Layout
-      </sect2>
-    </sect1>
-
-    <sect1>
-      <title> Tracing </title>
-      <para>
-    This sections covers all things related to the tracepoints implemented in
-    the i915 driver.
-      </para>
-      <sect2>
-        <title> i915_ppgtt_create and i915_ppgtt_release </title>
-!Pdrivers/gpu/drm/i915/i915_trace.h i915_ppgtt_create and i915_ppgtt_release tracepoints
-      </sect2>
-      <sect2>
-        <title> i915_context_create and i915_context_free </title>
-!Pdrivers/gpu/drm/i915/i915_trace.h i915_context_create and i915_context_free tracepoints
-      </sect2>
-      <sect2>
-        <title> switch_mm </title>
-!Pdrivers/gpu/drm/i915/i915_trace.h switch_mm tracepoint
-      </sect2>
-    </sect1>
-
-  </chapter>
-!Cdrivers/gpu/drm/i915/i915_irq.c
-</part>
-
-<part id="vga_switcheroo">
-  <title>vga_switcheroo</title>
-  <partintro>
-!Pdrivers/gpu/vga/vga_switcheroo.c Overview
-  </partintro>
-
-  <chapter id="modes_of_use">
-    <title>Modes of Use</title>
-    <sect1>
-      <title>Manual switching and manual power control</title>
-!Pdrivers/gpu/vga/vga_switcheroo.c Manual switching and manual power control
-    </sect1>
-    <sect1>
-      <title>Driver power control</title>
-!Pdrivers/gpu/vga/vga_switcheroo.c Driver power control
-    </sect1>
-  </chapter>
-
-  <chapter id="api">
-    <title>API</title>
-    <sect1>
-      <title>Public functions</title>
-!Edrivers/gpu/vga/vga_switcheroo.c
-    </sect1>
-    <sect1>
-      <title>Public structures</title>
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_ops
-    </sect1>
-    <sect1>
-      <title>Public constants</title>
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler_flags_t
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
-!Finclude/linux/vga_switcheroo.h vga_switcheroo_state
-    </sect1>
-    <sect1>
-      <title>Private structures</title>
-!Fdrivers/gpu/vga/vga_switcheroo.c vgasr_priv
-!Fdrivers/gpu/vga/vga_switcheroo.c vga_switcheroo_client
-    </sect1>
-  </chapter>
-
-  <chapter id="handlers">
-    <title>Handlers</title>
-    <sect1>
-      <title>apple-gmux Handler</title>
-!Pdrivers/platform/x86/apple-gmux.c Overview
-!Pdrivers/platform/x86/apple-gmux.c Interrupt
-      <sect2>
-        <title>Graphics mux</title>
-!Pdrivers/platform/x86/apple-gmux.c Graphics mux
-      </sect2>
-      <sect2>
-        <title>Power control</title>
-!Pdrivers/platform/x86/apple-gmux.c Power control
-      </sect2>
-      <sect2>
-        <title>Backlight control</title>
-!Pdrivers/platform/x86/apple-gmux.c Backlight control
-      </sect2>
-      <sect2>
-        <title>Public functions</title>
-!Iinclude/linux/apple-gmux.h
-      </sect2>
-    </sect1>
-  </chapter>
-
-!Cdrivers/gpu/vga/vga_switcheroo.c
-!Cinclude/linux/vga_switcheroo.h
-!Cdrivers/platform/x86/apple-gmux.c
-</part>
-
-</book>
diff --git a/Documentation/Makefile.sphinx b/Documentation/Makefile.sphinx
new file mode 100644 (file)
index 0000000..addf323
--- /dev/null
@@ -0,0 +1,63 @@
+# -*- makefile -*-
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXBUILD   = sphinx-build
+SPHINXOPTS    =
+PAPER         =
+BUILDDIR      = $(obj)/output
+
+# User-friendly check for sphinx-build
+HAVE_SPHINX := $(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi)
+
+ifeq ($(HAVE_SPHINX),0)
+
+.DEFAULT:
+       $(warning The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed and in PATH, or set the SPHINXBUILD make variable to point to the full path of the '$(SPHINXBUILD)' executable.)
+       @echo "  SKIP    Sphinx $@ target."
+
+else # HAVE_SPHINX
+
+# User-friendly check for rst2pdf
+HAVE_RST2PDF := $(shell if python -c "import rst2pdf" >/dev/null 2>&1; then echo 1; else echo 0; fi)
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+KERNELDOC       = $(srctree)/scripts/kernel-doc
+KERNELDOC_CONF  = -D kerneldoc_srctree=$(srctree) -D kerneldoc_bin=$(KERNELDOC)
+ALLSPHINXOPTS   = -D version=$(KERNELVERSION) -D release=$(KERNELRELEASE) -d $(BUILDDIR)/.doctrees $(KERNELDOC_CONF) $(PAPEROPT_$(PAPER)) -c $(srctree)/$(src) $(SPHINXOPTS) $(srctree)/$(src)
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+quiet_cmd_sphinx = SPHINX  $@
+      cmd_sphinx = $(SPHINXBUILD) -b $2 $(ALLSPHINXOPTS) $(BUILDDIR)/$2
+
+htmldocs:
+       $(call cmd,sphinx,html)
+
+pdfdocs:
+ifeq ($(HAVE_RST2PDF),0)
+       $(warning The Python 'rst2pdf' module was not found. Make sure you have the module installed to produce PDF output.)
+       @echo "  SKIP    Sphinx $@ target."
+else # HAVE_RST2PDF
+       $(call cmd,sphinx,pdf)
+endif # HAVE_RST2PDF
+
+epubdocs:
+       $(call cmd,sphinx,epub)
+
+xmldocs:
+       $(call cmd,sphinx,xml)
+
+# no-ops for the Sphinx toolchain
+sgmldocs:
+psdocs:
+mandocs:
+installmandocs:
+
+cleandocs:
+       $(Q)rm -rf $(BUILDDIR)
+
+endif # HAVE_SPHINX
index c6938e50e71f9bc653f1886ff0f93d6daddd4804..4da60b463995464a71c04245e7f90e7ced4f51d2 100644 (file)
@@ -56,6 +56,7 @@ stable kernels.
 | ARM            | MMU-500         | #841119,#826419 | N/A                     |
 |                |                 |                 |                         |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375    |
+| Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144    |
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154    |
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456    |
 | Cavium         | ThunderX SMMUv2 | #27704          | N/A                    |
diff --git a/Documentation/conf.py b/Documentation/conf.py
new file mode 100644 (file)
index 0000000..6cc41a0
--- /dev/null
@@ -0,0 +1,414 @@
+# -*- coding: utf-8 -*-
+#
+# The Linux Kernel documentation build configuration file, created by
+# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('sphinx'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = ['kernel-doc']
+
+# Gracefully handle missing rst2pdf.
+try:
+    import rst2pdf
+    extensions += ['rst2pdf.pdfbuilder']
+except ImportError:
+    pass
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'The Linux Kernel'
+copyright = '2016, The kernel development community'
+author = 'The kernel development community'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# In a normal build, version and release are are set to KERNELVERSION and
+# KERNELRELEASE, respectively, from the Makefile via Sphinx command line
+# arguments.
+#
+# The following code tries to extract the information by reading the Makefile,
+# when Sphinx is run directly (e.g. by Read the Docs).
+try:
+    makefile_version = None
+    makefile_patchlevel = None
+    for line in open('../Makefile'):
+        key, val = [x.strip() for x in line.split('=', 2)]
+        if key == 'VERSION':
+            makefile_version = val
+        elif key == 'PATCHLEVEL':
+            makefile_patchlevel = val
+        if makefile_version and makefile_patchlevel:
+            break
+except:
+    pass
+finally:
+    if makefile_version and makefile_patchlevel:
+        version = release = makefile_version + '.' + makefile_patchlevel
+    else:
+        sys.stderr.write('Warning: Could not extract kernel version\n')
+        version = release = "unknown version"
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['output']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+primary_domain = 'C'
+highlight_language = 'C'
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+
+# The Read the Docs theme is available from
+# - https://github.com/snide/sphinx_rtd_theme
+# - https://pypi.python.org/pypi/sphinx_rtd_theme
+# - python-sphinx-rtd-theme package (on Debian)
+try:
+    import sphinx_rtd_theme
+    html_theme = 'sphinx_rtd_theme'
+    html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+except ImportError:
+    sys.stderr.write('Warning: The Sphinx \'sphinx_rtd_theme\' HTML theme was not found. Make sure you have the theme installed to produce pretty HTML output. Falling back to the default theme.\n')
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+#html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+#   'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
+#   'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'TheLinuxKerneldoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+    (master_doc, 'TheLinuxKernel.tex', 'The Linux Kernel Documentation',
+     'The kernel development community', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    (master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation',
+     [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+    (master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation',
+     author, 'TheLinuxKernel', 'One line description of project.',
+     'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# -- Options for Epub output ----------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = project
+epub_author = author
+epub_publisher = author
+epub_copyright = copyright
+
+# The basename for the epub file. It defaults to the project name.
+#epub_basename = project
+
+# The HTML theme for the epub output. Since the default themes are not
+# optimized for small screen space, using the same theme for HTML and epub
+# output is usually not wise. This defaults to 'epub', a theme designed to save
+# visual space.
+#epub_theme = 'epub'
+
+# The language of the text. It defaults to the language option
+# or 'en' if the language is not set.
+#epub_language = ''
+
+# The scheme of the identifier. Typical schemes are ISBN or URL.
+#epub_scheme = ''
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#epub_identifier = ''
+
+# A unique identification for the text.
+#epub_uid = ''
+
+# A tuple containing the cover image and cover page html template filenames.
+#epub_cover = ()
+
+# A sequence of (type, uri, title) tuples for the guide element of content.opf.
+#epub_guide = ()
+
+# HTML files that should be inserted before the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_pre_files = []
+
+# HTML files that should be inserted after the pages created by sphinx.
+# The format is a list of tuples containing the path and title.
+#epub_post_files = []
+
+# A list of files that should not be packed into the epub file.
+epub_exclude_files = ['search.html']
+
+# The depth of the table of contents in toc.ncx.
+#epub_tocdepth = 3
+
+# Allow duplicate toc entries.
+#epub_tocdup = True
+
+# Choose between 'default' and 'includehidden'.
+#epub_tocscope = 'default'
+
+# Fix unsupported image types using the Pillow.
+#epub_fix_images = False
+
+# Scale large images.
+#epub_max_image_width = 0
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#epub_show_urls = 'inline'
+
+# If false, no index is generated.
+#epub_use_index = True
+
+#=======
+# rst2pdf
+#
+# Grouping the document tree into PDF files. List of tuples
+# (source start file, target name, title, author, options).
+#
+# See the Sphinx chapter of http://ralsina.me/static/manual.pdf
+#
+# FIXME: Do not add the index file here; the result will be too big. Adding
+# multiple PDF files here actually tries to get the cross-referencing right
+# *between* PDF files.
+pdf_documents = [
+    ('index', u'Kernel', u'Kernel', u'J. Random Bozo'),
+]
+
+# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
+# the Docs). In a normal build, these are supplied from the Makefile via command
+# line arguments.
+kerneldoc_bin = '../scripts/kernel-doc'
+kerneldoc_srctree = '..'
diff --git a/Documentation/devicetree/bindings/display/arm,malidp.txt b/Documentation/devicetree/bindings/display/arm,malidp.txt
new file mode 100644 (file)
index 0000000..2f78709
--- /dev/null
@@ -0,0 +1,65 @@
+ARM Mali-DP
+
+The following bindings apply to a family of Display Processors sold as
+licensable IP by ARM Ltd. The bindings describe the Mali DP500, DP550 and
+DP650 processors that offer multiple composition layers, support for
+rotation and scaling output.
+
+Required properties:
+  - compatible: should be one of
+       "arm,mali-dp500"
+       "arm,mali-dp550"
+       "arm,mali-dp650"
+    depending on the particular implementation present in the hardware
+  - reg: Physical base address and size of the block of registers used by
+    the processor.
+  - interrupts: Interrupt list, as defined in ../interrupt-controller/interrupts.txt,
+    interrupt client nodes.
+  - interrupt-names: name of the engine inside the processor that will
+    use the corresponding interrupt. Should be one of "DE" or "SE".
+  - clocks: A list of phandle + clock-specifier pairs, one for each entry
+    in 'clock-names'
+  - clock-names: A list of clock names. It should contain:
+      - "pclk": for the APB interface clock
+      - "aclk": for the AXI interface clock
+      - "mclk": for the main processor clock
+      - "pxlclk": for the pixel clock feeding the output PLL of the processor.
+  - arm,malidp-output-port-lines: Array of u8 values describing the number
+    of output lines per channel (R, G and B).
+
+Required sub-nodes:
+  - port: The Mali DP connection to an encoder input port. The connection
+    is modelled using the OF graph bindings specified in
+    Documentation/devicetree/bindings/graph.txt
+
+Optional properties:
+  - memory-region: phandle to a node describing memory (see
+    Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt)
+    to be used for the framebuffer; if not present, the framebuffer may
+    be located anywhere in memory.
+
+
+Example:
+
+/ {
+       ...
+
+       dp0: malidp@6f200000 {
+               compatible = "arm,mali-dp650";
+               reg = <0 0x6f200000 0 0x20000>;
+               memory-region = <&display_reserved>;
+               interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>,
+                            <0 168 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "DE", "SE";
+               clocks = <&oscclk2>, <&fpgaosc0>, <&fpgaosc1>, <&fpgaosc1>;
+               clock-names = "pxlclk", "mclk", "aclk", "pclk";
+               arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
+               port {
+                       dp0_output: endpoint {
+                               remote-endpoint = <&tda998x_2_input>;
+                       };
+               };
+       };
+
+       ...
+};
index 96c25ee01501fdad9b5d402e78b30de0d54f3cf0..6532a59c9b43f3c16d0e01a2a0bb5b9d35b8ecb9 100644 (file)
@@ -1,13 +1,19 @@
-Analog Device ADV7511(W)/13 HDMI Encoders
+Analog Device ADV7511(W)/13/33 HDMI Encoders
 -----------------------------------------
 
-The ADV7511, ADV7511W and ADV7513 are HDMI audio and video transmitters
+The ADV7511, ADV7511W, ADV7513 and ADV7533 are HDMI audio and video transmitters
 compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
-S/PDIF, CEC and HDCP.
+S/PDIF, CEC and HDCP. ADV7533 supports the DSI interface for input pixels, while
+the others support RGB interface.
 
 Required properties:
 
-- compatible: Should be one of "adi,adv7511", "adi,adv7511w" or "adi,adv7513"
+- compatible: Should be one of:
+               "adi,adv7511"
+               "adi,adv7511w"
+               "adi,adv7513"
+               "adi,adv7533"
+
 - reg: I2C slave address
 
 The ADV7511 supports a large number of input data formats that differ by their
@@ -32,6 +38,11 @@ The following input format properties are required except in "rgb 1x" and
 - adi,input-justification: The input bit justification ("left", "evenly",
   "right").
 
+The following properties are required for ADV7533:
+
+- adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
+  be one of 1, 2, 3 or 4.
+
 Optional properties:
 
 - interrupts: Specifier for the ADV7511 interrupt
@@ -42,13 +53,18 @@ Optional properties:
 - adi,embedded-sync: The input uses synchronization signals embedded in the
   data stream (similar to BT.656). Defaults to separate H/V synchronization
   signals.
+- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
+  generator. The chip will rely on the sync signals in the DSI data lanes,
+  rather than generate its own timings for HDMI output.
 
 Required nodes:
 
 The ADV7511 has two video ports. Their connections are modelled using the OF
 graph bindings specified in Documentation/devicetree/bindings/graph.txt.
 
-- Video port 0 for the RGB or YUV input
+- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533, the
+  remote endpoint phandle should be a reference to a valid mipi_dsi_host device
+  node.
 - Video port 1 for the HDMI output
 
 
index 4f2ba8c13d9259c528322070790f92da84b3bb58..4a0f4f7682ad4a742f32e66125a424e5dc6837f7 100644 (file)
@@ -5,6 +5,7 @@ Required properties for dp-controller:
                platform specific such as:
                 * "samsung,exynos5-dp"
                 * "rockchip,rk3288-dp"
+                * "rockchip,rk3399-edp"
        -reg:
                physical base address of the controller and length
                of memory mapped region.
diff --git a/Documentation/devicetree/bindings/display/bridge/sii902x.txt b/Documentation/devicetree/bindings/display/bridge/sii902x.txt
new file mode 100644 (file)
index 0000000..56a3e68
--- /dev/null
@@ -0,0 +1,35 @@
+sii902x HDMI bridge bindings
+
+Required properties:
+       - compatible: "sil,sii9022"
+       - reg: i2c address of the bridge
+
+Optional properties:
+       - interrupts-extended or interrupt-parent + interrupts: describe
+         the interrupt line used to inform the host about hotplug events.
+       - reset-gpios: OF device-tree gpio specification for RST_N pin.
+
+Optional subnodes:
+       - video input: this subnode can contain a video input port node
+         to connect the bridge to a display controller output (See this
+         documentation [1]).
+
+[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+       hdmi-bridge@39 {
+               compatible = "sil,sii9022";
+               reg = <0x39>;
+               reset-gpios = <&pioA 1 0>;
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               bridge_in: endpoint {
+                                       remote-endpoint = <&dc_out>;
+                               };
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358767.txt
new file mode 100644 (file)
index 0000000..e3f6aa6
--- /dev/null
@@ -0,0 +1,53 @@
+Toshiba TC358767 eDP bridge bindings
+
+Required properties:
+ - compatible: "toshiba,tc358767"
+ - reg: i2c address of the bridge, 0x68 or 0x0f, depending on bootstrap pins
+ - clock-names: should be "ref"
+ - clocks: OF device-tree clock specification for refclk input. The reference
+   clock rate must be 13 MHz, 19.2 MHz, 26 MHz, or 38.4 MHz.
+
+Optional properties:
+ - shutdown-gpios: OF device-tree gpio specification for SD pin
+                   (active high shutdown input)
+ - reset-gpios: OF device-tree gpio specification for RSTX pin
+                (active low system reset)
+ - ports: the ports node can contain video interface port nodes to connect
+   to a DPI/DSI source and to an eDP/DP sink according to [1][2]:
+    - port@0: DSI input port
+    - port@1: DPI input port
+    - port@2: eDP/DP output port
+
+[1]: Documentation/devicetree/bindings/graph.txt
+[2]: Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Example:
+       edp-bridge@68 {
+               compatible = "toshiba,tc358767";
+               reg = <0x68>;
+               shutdown-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
+               clock-names = "ref";
+               clocks = <&edp_refclk>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@1 {
+                               reg = <1>;
+
+                               bridge_in: endpoint {
+                                       remote-endpoint = <&dpi_out>;
+                               };
+                       };
+
+                       port@2 {
+                               reg = <2>;
+
+                               bridge_out: endpoint {
+                                       remote-endpoint = <&panel_in>;
+                               };
+                       };
+               };
+       };
index acd5668b1ce1d7832de2e52f57644df2d69a9c6d..508aee461e0dd5723e9b11fe8f765ba5ee0d9de3 100644 (file)
@@ -8,6 +8,7 @@ Required properties:
 Optional properties:
 - label: a symbolic name for the connector
 - hpd-gpios: HPD GPIO number
+- ddc-i2c-bus: phandle link to the I2C controller used for DDC EDID probing
 
 Required nodes:
 - Video port for HDMI input
index ae55cde1b69e44f1ecb186b956cae75e743cb47c..63ec2a624aa9455a3d4c41ff8eca165e4587c96d 100644 (file)
@@ -12,7 +12,7 @@ Required properties:
 - clock-names:         Should be "dcu" and "pix"
                        See ../clocks/clock-bindings.txt for details.
 - big-endian           Boolean property, LS1021A DCU registers are big-endian.
-- fsl,panel:           The phandle to panel node.
+- port                 Video port for the panel output
 
 Optional properties:
 - fsl,tcon:            The phandle to the timing controller node.
@@ -24,6 +24,11 @@ dcu: dcu@2ce0000 {
        clocks = <&platform_clk 0>, <&platform_clk 0>;
        clock-names = "dcu", "pix";
        big-endian;
-       fsl,panel = <&panel>;
        fsl,tcon = <&tcon>;
+
+       port {
+               dcu_out: endpoint {
+                       remote-endpoint = <&panel_out>;
+            };
+       };
 };
index 0a175d991b52b26a5c81d9166329558713e45406..a407462c885e43f31eefcbf22d209f785a8d9ebe 100644 (file)
@@ -62,6 +62,7 @@ Required properties:
    display-timings are used instead.
 
 Optional properties (required if display-timings are used):
+ - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
  - display-timings : A node that describes the display timings as defined in
    Documentation/devicetree/bindings/display/display-timing.txt.
  - fsl,data-mapping : should be "spwg" or "jeida"
diff --git a/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt b/Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
new file mode 100644 (file)
index 0000000..7b12424
--- /dev/null
@@ -0,0 +1,148 @@
+Mediatek HDMI Encoder
+=====================
+
+The Mediatek HDMI encoder can generate HDMI 1.4a or MHL 2.0 signals from
+its parallel input.
+
+Required properties:
+- compatible: Should be "mediatek,<chip>-hdmi".
+- reg: Physical base address and length of the controller's registers
+- interrupts: The interrupt signal from the function block.
+- clocks: device clocks
+  See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
+- clock-names: must contain "pixel", "pll", "bclk", and "spdif".
+- phys: phandle link to the HDMI PHY node.
+  See Documentation/devicetree/bindings/phy/phy-bindings.txt for details.
+- phy-names: must contain "hdmi"
+- mediatek,syscon-hdmi: phandle link and register offset to the system
+  configuration registers. For mt8173 this must be offset 0x900 into the
+  MMSYS_CONFIG region: <&mmsys 0x900>.
+- ports: A node containing input and output port nodes with endpoint
+  definitions as documented in Documentation/devicetree/bindings/graph.txt.
+- port@0: The input port in the ports node should be connected to a DPI output
+  port.
+- port@1: The output port in the ports node should be connected to the input
+  port of a connector node that contains a ddc-i2c-bus property, or to the
+  input port of an attached bridge chip, such as a SlimPort transmitter.
+
+HDMI CEC
+========
+
+The HDMI CEC controller handles hotplug detection and CEC communication.
+
+Required properties:
+- compatible: Should be "mediatek,<chip>-cec"
+- reg: Physical base address and length of the controller's registers
+- interrupts: The interrupt signal from the function block.
+- clocks: device clock
+
+HDMI DDC
+========
+
+The HDMI DDC i2c controller is used to interface with the HDMI DDC pins.
+The Mediatek's I2C controller is used to interface with I2C devices.
+
+Required properties:
+- compatible: Should be "mediatek,<chip>-hdmi-ddc"
+- reg: Physical base address and length of the controller's registers
+- clocks: device clock
+- clock-names: Should be "ddc-i2c".
+
+HDMI PHY
+========
+
+The HDMI PHY serializes the HDMI encoder's three channel 10-bit parallel
+output and drives the HDMI pads.
+
+Required properties:
+- compatible: "mediatek,<chip>-hdmi-phy"
+- reg: Physical base address and length of the module's registers
+- clocks: PLL reference clock
+- clock-names: must contain "pll_ref"
+- clock-output-names: must be "hdmitx_dig_cts" on mt8173
+- #phy-cells: must be <0>
+- #clock-cells: must be <0>
+
+Optional properties:
+- mediatek,ibias: TX DRV bias current for <1.65Gbps, defaults to 0xa
+- mediatek,ibias_up: TX DRV bias current for >1.65Gbps, defaults to 0x1c
+
+Example:
+
+cec: cec@10013000 {
+       compatible = "mediatek,mt8173-cec";
+       reg = <0 0x10013000 0 0xbc>;
+       interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_LOW>;
+       clocks = <&infracfg CLK_INFRA_CEC>;
+};
+
+hdmi_phy: hdmi-phy@10209100 {
+       compatible = "mediatek,mt8173-hdmi-phy";
+       reg = <0 0x10209100 0 0x24>;
+       clocks = <&apmixedsys CLK_APMIXED_HDMI_REF>;
+       clock-names = "pll_ref";
+       clock-output-names = "hdmitx_dig_cts";
+       mediatek,ibias = <0xa>;
+       mediatek,ibias_up = <0x1c>;
+       #clock-cells = <0>;
+       #phy-cells = <0>;
+};
+
+hdmi_ddc0: i2c@11012000 {
+       compatible = "mediatek,mt8173-hdmi-ddc";
+       reg = <0 0x11012000 0 0x1c>;
+       interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_LOW>;
+       clocks = <&pericfg CLK_PERI_I2C5>;
+       clock-names = "ddc-i2c";
+};
+
+hdmi0: hdmi@14025000 {
+       compatible = "mediatek,mt8173-hdmi";
+       reg = <0 0x14025000 0 0x400>;
+       interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_LOW>;
+       clocks = <&mmsys CLK_MM_HDMI_PIXEL>,
+                <&mmsys CLK_MM_HDMI_PLLCK>,
+                <&mmsys CLK_MM_HDMI_AUDIO>,
+                <&mmsys CLK_MM_HDMI_SPDIF>;
+       clock-names = "pixel", "pll", "bclk", "spdif";
+       pinctrl-names = "default";
+       pinctrl-0 = <&hdmi_pin>;
+       phys = <&hdmi_phy>;
+       phy-names = "hdmi";
+       mediatek,syscon-hdmi = <&mmsys 0x900>;
+       assigned-clocks = <&topckgen CLK_TOP_HDMI_SEL>;
+       assigned-clock-parents = <&hdmi_phy>;
+
+       ports {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               port@0 {
+                       reg = <0>;
+
+                       hdmi0_in: endpoint {
+                               remote-endpoint = <&dpi0_out>;
+                       };
+               };
+
+               port@1 {
+                       reg = <1>;
+
+                       hdmi0_out: endpoint {
+                               remote-endpoint = <&hdmi_con_in>;
+                       };
+               };
+       };
+};
+
+connector {
+       compatible = "hdmi-connector";
+       type = "a";
+       ddc-i2c-bus = <&hdmiddc0>;
+
+       port {
+               hdmi_con_in: endpoint {
+                       remote-endpoint = <&hdmi0_out>;
+               };
+       };
+};
index f5948c48b9a23582e63a7573a3e394ed9b37360c..6b1cab17f52d0549ea8a3e3cbef460e6c3367cbf 100644 (file)
@@ -11,8 +11,7 @@ Required properties:
   be 0 or 1, since we have 2 DSI controllers at most for now.
 - interrupts: The interrupt signal from the DSI block.
 - power-domains: Should be <&mmcc MDSS_GDSC>.
-- clocks: device clocks
-  See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
+- clocks: Phandles to device clocks.
 - clock-names: the following clocks are required:
   * "mdp_core_clk"
   * "iface_clk"
@@ -23,16 +22,21 @@ Required properties:
   * "core_clk"
   For DSIv2, we need an additional clock:
    * "src_clk"
+- assigned-clocks: Parents of "byte_clk" and "pixel_clk" for the given platform.
+- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
+  by a DSI PHY block. See [1] for details on clock bindings.
 - vdd-supply: phandle to vdd regulator device node
 - vddio-supply: phandle to vdd-io regulator device node
 - vdda-supply: phandle to vdda regulator device node
-- qcom,dsi-phy: phandle to DSI PHY device node
+- phys: phandle to DSI PHY device node
+- phy-names: the name of the corresponding PHY device
 - syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2)
+- ports: Contains 2 DSI controller ports as child nodes. Each port contains
+  an endpoint subnode as defined in [2] and [3].
 
 Optional properties:
 - panel@0: Node of panel connected to this DSI controller.
-  See files in Documentation/devicetree/bindings/display/panel/ for each supported
-  panel.
+  See files in [4] for each supported panel.
 - qcom,dual-dsi-mode: Boolean value indicating if the DSI controller is
   driving a panel which needs 2 DSI links.
 - qcom,master-dsi: Boolean value indicating if the DSI controller is driving
@@ -44,34 +48,38 @@ Optional properties:
 - pinctrl-names: the pin control state names; should contain "default"
 - pinctrl-0: the default pinctrl state (active)
 - pinctrl-n: the "sleep" pinctrl state
-- port: DSI controller output port, containing one endpoint subnode.
+- ports: contains DSI controller input and output ports as children, each
+  containing one endpoint subnode.
 
   DSI Endpoint properties:
-  - remote-endpoint: set to phandle of the connected panel's endpoint.
-    See Documentation/devicetree/bindings/graph.txt for device graph info.
-  - qcom,data-lane-map: this describes how the logical DSI lanes are mapped
-    to the physical lanes on the given platform. The value contained in
-    index n describes what logical data lane is mapped to the physical data
-    lane n (DATAn, where n lies between 0 and 3).
+  - remote-endpoint: For port@0, set to phandle of the connected panel/bridge's
+    input endpoint. For port@1, set to the MDP interface output. See [2] for
+    device graph info.
+
+  - data-lanes: this describes how the physical DSI data lanes are mapped
+    to the logical lanes on the given platform. The value contained in
+    index n describes what physical lane is mapped to the logical lane n
+    (DATAn, where n lies between 0 and 3). The clock lane position is fixed
+    and can't be changed. Hence, they aren't a part of the DT bindings. See
+    [3] for more info on the data-lanes property.
 
     For example:
 
-    qcom,data-lane-map = <3 0 1 2>;
+    data-lanes = <3 0 1 2>;
 
-    The above mapping describes that the logical data lane DATA3 is mapped to
-    the physical data lane DATA0, logical DATA0 to physical DATA1, logic DATA1
-    to phys DATA2 and logic DATA2 to phys DATA3.
+    The above mapping describes that the logical data lane DATA0 is mapped to
+    the physical data lane DATA3, logical DATA1 to physical DATA0, logic DATA2
+    to phys DATA1 and logic DATA3 to phys DATA2.
 
     There are only a limited number of physical to logical mappings possible:
-
-    "0123": Logic 0->Phys 0; Logic 1->Phys 1; Logic 2->Phys 2; Logic 3->Phys 3;
-    "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
-    "2301": Logic 2->Phys 0; Logic 3->Phys 1; Logic 0->Phys 2; Logic 1->Phys 3;
-    "1230": Logic 1->Phys 0; Logic 2->Phys 1; Logic 3->Phys 2; Logic 0->Phys 3;
-    "0321": Logic 0->Phys 0; Logic 3->Phys 1; Logic 2->Phys 2; Logic 1->Phys 3;
-    "1032": Logic 1->Phys 0; Logic 0->Phys 1; Logic 3->Phys 2; Logic 2->Phys 3;
-    "2103": Logic 2->Phys 0; Logic 1->Phys 1; Logic 0->Phys 2; Logic 3->Phys 3;
-    "3210": Logic 3->Phys 0; Logic 2->Phys 1; Logic 1->Phys 2; Logic 0->Phys 3;
+    <0 1 2 3>
+    <1 2 3 0>
+    <2 3 0 1>
+    <3 0 1 2>
+    <0 3 2 1>
+    <1 0 3 2>
+    <2 1 0 3>
+    <3 2 1 0>
 
 DSI PHY:
 Required properties:
@@ -86,11 +94,12 @@ Required properties:
   * "dsi_pll"
   * "dsi_phy"
   * "dsi_phy_regulator"
+- clock-cells: Must be 1. The DSI PHY block acts as a clock provider, creating
+  2 clocks: A byte clock (index 0), and a pixel clock (index 1).
 - qcom,dsi-phy-index: The ID of DSI PHY hardware instance. This should
   be 0 or 1, since we have 2 DSI PHYs at most for now.
 - power-domains: Should be <&mmcc MDSS_GDSC>.
-- clocks: device clocks
-  See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
+- clocks: Phandles to device clocks. See [1] for details on clock bindings.
 - clock-names: the following clocks are required:
   * "iface_clk"
 - vddio-supply: phandle to vdd-io regulator device node
@@ -99,11 +108,16 @@ Optional properties:
 - qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
   regulator is wanted.
 
+[1] Documentation/devicetree/bindings/clocks/clock-bindings.txt
+[2] Documentation/devicetree/bindings/graph.txt
+[3] Documentation/devicetree/bindings/media/video-interfaces.txt
+[4] Documentation/devicetree/bindings/display/panel/
+
 Example:
-       mdss_dsi0: qcom,mdss_dsi@fd922800 {
+       dsi0: dsi@fd922800 {
                compatible = "qcom,mdss-dsi-ctrl";
                qcom,dsi-host-index = <0>;
-               interrupt-parent = <&mdss_mdp>;
+               interrupt-parent = <&mdp>;
                interrupts = <4 0>;
                reg-names = "dsi_ctrl";
                reg = <0xfd922800 0x200>;
@@ -124,19 +138,48 @@ Example:
                        <&mmcc MDSS_AHB_CLK>,
                        <&mmcc MDSS_MDP_CLK>,
                        <&mmcc MDSS_PCLK0_CLK>;
+
+               assigned-clocks =
+                                <&mmcc BYTE0_CLK_SRC>,
+                                <&mmcc PCLK0_CLK_SRC>;
+               assigned-clock-parents =
+                                <&dsi_phy0 0>,
+                                <&dsi_phy0 1>;
+
                vdda-supply = <&pma8084_l2>;
                vdd-supply = <&pma8084_l22>;
                vddio-supply = <&pma8084_l12>;
 
-               qcom,dsi-phy = <&mdss_dsi_phy0>;
+               phys = <&dsi_phy0>;
+               phy-names ="dsi-phy";
 
                qcom,dual-dsi-mode;
                qcom,master-dsi;
                qcom,sync-dual-dsi;
 
                pinctrl-names = "default", "sleep";
-               pinctrl-0 = <&mdss_dsi_active>;
-               pinctrl-1 = <&mdss_dsi_suspend>;
+               pinctrl-0 = <&dsi_active>;
+               pinctrl-1 = <&dsi_suspend>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               dsi0_in: endpoint {
+                                       remote-endpoint = <&mdp_intf1_out>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               dsi0_out: endpoint {
+                                       remote-endpoint = <&panel_in>;
+                                       data-lanes = <0 1 2 3>;
+                               };
+                       };
+               };
 
                panel: panel@0 {
                        compatible = "sharp,lq101r1sx01";
@@ -152,16 +195,9 @@ Example:
                                };
                        };
                };
-
-               port {
-                       dsi0_out: endpoint {
-                               remote-endpoint = <&panel_in>;
-                               lanes = <0 1 2 3>;
-                       };
-               };
        };
 
-       mdss_dsi_phy0: qcom,mdss_dsi_phy@fd922a00 {
+       dsi_phy0: dsi-phy@fd922a00 {
                compatible = "qcom,dsi-phy-28nm-hpm";
                qcom,dsi-phy-index = <0>;
                reg-names =
@@ -173,6 +209,7 @@ Example:
                        <0xfd922d80 0x7b>;
                clock-names = "iface_clk";
                clocks = <&mmcc MDSS_AHB_CLK>;
+               #clock-cells = <1>;
                vddio-supply = <&pma8084_l12>;
 
                qcom,dsi-phy-regulator-ldo-mode;
diff --git a/Documentation/devicetree/bindings/display/msm/mdp.txt b/Documentation/devicetree/bindings/display/msm/mdp.txt
deleted file mode 100644 (file)
index a214f6c..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-Qualcomm adreno/snapdragon display controller
-
-Required properties:
-- compatible:
-  * "qcom,mdp4" - mdp4
-  * "qcom,mdp5" - mdp5
-- reg: Physical base address and length of the controller's registers.
-- interrupts: The interrupt signal from the display controller.
-- connectors: array of phandles for output device(s)
-- clocks: device clocks
-  See ../clocks/clock-bindings.txt for details.
-- clock-names: the following clocks are required.
-  For MDP4:
-   * "core_clk"
-   * "iface_clk"
-   * "lut_clk"
-   * "src_clk"
-   * "hdmi_clk"
-   * "mdp_clk"
-  For MDP5:
-   * "bus_clk"
-   * "iface_clk"
-   * "core_clk_src"
-   * "core_clk"
-   * "lut_clk" (some MDP5 versions may not need this)
-   * "vsync_clk"
-
-Optional properties:
-- gpus: phandle for gpu device
-- clock-names: the following clocks are optional:
-  * "lut_clk"
-
-Example:
-
-/ {
-       ...
-
-       mdp: qcom,mdp@5100000 {
-               compatible = "qcom,mdp4";
-               reg = <0x05100000 0xf0000>;
-               interrupts = <GIC_SPI 75 0>;
-               connectors = <&hdmi>;
-               gpus = <&gpu>;
-               clock-names =
-                   "core_clk",
-                   "iface_clk",
-                   "lut_clk",
-                   "src_clk",
-                   "hdmi_clk",
-                   "mdp_clk";
-               clocks =
-                   <&mmcc MDP_SRC>,
-                   <&mmcc MDP_AHB_CLK>,
-                   <&mmcc MDP_LUT_CLK>,
-                   <&mmcc TV_SRC>,
-                   <&mmcc HDMI_TV_CLK>,
-                   <&mmcc MDP_TV_CLK>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/msm/mdp4.txt b/Documentation/devicetree/bindings/display/msm/mdp4.txt
new file mode 100644 (file)
index 0000000..3c341a1
--- /dev/null
@@ -0,0 +1,112 @@
+Qualcomm adreno/snapdragon MDP4 display controller
+
+Description:
+
+This is the bindings documentation for the MDP4 display controller found in
+SoCs like MSM8960, APQ8064 and MSM8660.
+
+Required properties:
+- compatible:
+  * "qcom,mdp4" - mdp4
+- reg: Physical base address and length of the controller's registers.
+- interrupts: The interrupt signal from the display controller.
+- clocks: device clocks
+  See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required.
+  * "core_clk"
+  * "iface_clk"
+  * "bus_clk"
+  * "lut_clk"
+  * "hdmi_clk"
+  * "tv_clk"
+- ports: contains the list of output ports from MDP. These connect to interfaces
+  that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
+  special case since it is a part of the MDP block itself).
+
+  Each output port contains an endpoint that describes how it is connected to an
+  external interface. These are described by the standard properties documented
+  here:
+       Documentation/devicetree/bindings/graph.txt
+       Documentation/devicetree/bindings/media/video-interfaces.txt
+
+  The output port mappings are:
+       Port 0 -> LCDC/LVDS
+       Port 1 -> DSI1 Cmd/Video
+       Port 2 -> DSI2 Cmd/Video
+       Port 3 -> DTV
+
+Optional properties:
+- clock-names: the following clocks are optional:
+  * "lut_clk"
+
+Example:
+
+/ {
+       ...
+
+       hdmi: hdmi@4a00000 {
+               ...
+               ports {
+                       ...
+                       port@0 {
+                               reg = <0>;
+                               hdmi_in: endpoint {
+                                       remote-endpoint = <&mdp_dtv_out>;
+                               };
+                       };
+                       ...
+               };
+               ...
+       };
+
+       ...
+
+       mdp: mdp@5100000 {
+               compatible = "qcom,mdp4";
+               reg = <0x05100000 0xf0000>;
+               interrupts = <GIC_SPI 75 0>;
+               clock-names =
+                   "core_clk",
+                   "iface_clk",
+                   "lut_clk",
+                   "hdmi_clk",
+                   "tv_clk";
+               clocks =
+                   <&mmcc MDP_CLK>,
+                   <&mmcc MDP_AHB_CLK>,
+                   <&mmcc MDP_AXI_CLK>,
+                   <&mmcc MDP_LUT_CLK>,
+                   <&mmcc HDMI_TV_CLK>,
+                   <&mmcc MDP_TV_CLK>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                               port@0 {
+                                       reg = <0>;
+                                       mdp_lvds_out: endpoint {
+                                       };
+                               };
+
+                               port@1 {
+                                       reg = <1>;
+                                       mdp_dsi1_out: endpoint {
+                                       };
+                               };
+
+                               port@2 {
+                                       reg = <2>;
+                                       mdp_dsi2_out: endpoint {
+                                       };
+                               };
+
+                               port@3 {
+                                       reg = <3>;
+                                       mdp_dtv_out: endpoint {
+                                               remote-endpoint = <&hdmi_in>;
+                                       };
+                               };
+               };
+       };
+};
diff --git a/Documentation/devicetree/bindings/display/msm/mdp5.txt b/Documentation/devicetree/bindings/display/msm/mdp5.txt
new file mode 100644 (file)
index 0000000..30c11ea
--- /dev/null
@@ -0,0 +1,160 @@
+Qualcomm adreno/snapdragon MDP5 display controller
+
+Description:
+
+This is the bindings documentation for the Mobile Display Subsytem(MDSS) that
+encapsulates sub-blocks like MDP5, DSI, HDMI, eDP etc, and the MDP5 display
+controller found in SoCs like MSM8974, APQ8084, MSM8916, MSM8994 and MSM8996.
+
+MDSS:
+Required properties:
+- compatible:
+  * "qcom,mdss" - MDSS
+- reg: Physical base address and length of the controller's registers.
+- reg-names: The names of register regions. The following regions are required:
+  * "mdss_phys"
+  * "vbif_phys"
+- interrupts: The interrupt signal from MDSS.
+- interrupt-controller: identifies the node as an interrupt controller.
+- #interrupt-cells: specifies the number of cells needed to encode an interrupt
+  source, should be 1.
+- power-domains: a power domain consumer specifier according to
+  Documentation/devicetree/bindings/power/power_domain.txt
+- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required.
+  * "iface_clk"
+  * "bus_clk"
+  * "vsync_clk"
+- #address-cells: number of address cells for the MDSS children. Should be 1.
+- #size-cells: Should be 1.
+- ranges: parent bus address space is the same as the child bus address space.
+
+Optional properties:
+- clock-names: the following clocks are optional:
+  * "lut_clk"
+
+MDP5:
+Required properties:
+- compatible:
+  * "qcom,mdp5" - MDP5
+- reg: Physical base address and length of the controller's registers.
+- reg-names: The names of register regions. The following regions are required:
+  * "mdp_phys"
+- interrupts: Interrupt line from MDP5 to MDSS interrupt controller.
+- interrupt-parent: phandle to the MDSS block
+  through MDP block
+- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
+- clock-names: the following clocks are required.
+-   * "bus_clk"
+-   * "iface_clk"
+-   * "core_clk"
+-   * "vsync_clk"
+- ports: contains the list of output ports from MDP. These connect to interfaces
+  that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
+  special case since it is a part of the MDP block itself).
+
+  Each output port contains an endpoint that describes how it is connected to an
+  external interface. These are described by the standard properties documented
+  here:
+       Documentation/devicetree/bindings/graph.txt
+       Documentation/devicetree/bindings/media/video-interfaces.txt
+
+  The availability of output ports can vary across SoC revisions:
+
+  For MSM8974 and APQ8084:
+        Port 0 -> MDP_INTF0 (eDP)
+        Port 1 -> MDP_INTF1 (DSI1)
+        Port 2 -> MDP_INTF2 (DSI2)
+        Port 3 -> MDP_INTF3 (HDMI)
+
+  For MSM8916:
+        Port 0 -> MDP_INTF1 (DSI1)
+
+  For MSM8994 and MSM8996:
+        Port 0 -> MDP_INTF1 (DSI1)
+        Port 1 -> MDP_INTF2 (DSI2)
+        Port 2 -> MDP_INTF3 (HDMI)
+
+Optional properties:
+- clock-names: the following clocks are optional:
+  * "lut_clk"
+
+Example:
+
+/ {
+       ...
+
+       mdss: mdss@1a00000 {
+               compatible = "qcom,mdss";
+               reg = <0x1a00000 0x1000>,
+                     <0x1ac8000 0x3000>;
+               reg-names = "mdss_phys", "vbif_phys";
+
+               power-domains = <&gcc MDSS_GDSC>;
+
+               clocks = <&gcc GCC_MDSS_AHB_CLK>,
+                        <&gcc GCC_MDSS_AXI_CLK>,
+                        <&gcc GCC_MDSS_VSYNC_CLK>;
+               clock-names = "iface_clk",
+                             "bus_clk",
+                             "vsync_clk"
+
+               interrupts = <0 72 0>;
+
+               interrupt-controller;
+               #interrupt-cells = <1>;
+
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               mdp: mdp@1a01000 {
+                       compatible = "qcom,mdp5";
+                       reg = <0x1a01000 0x90000>;
+                       reg-names = "mdp_phys";
+
+                       interrupt-parent = <&mdss>;
+                       interrupts = <0 0>;
+
+                       clocks = <&gcc GCC_MDSS_AHB_CLK>,
+                                <&gcc GCC_MDSS_AXI_CLK>,
+                                <&gcc GCC_MDSS_MDP_CLK>,
+                                <&gcc GCC_MDSS_VSYNC_CLK>;
+                       clock-names = "iface_clk",
+                                     "bus_clk",
+                                     "core_clk",
+                                     "vsync_clk";
+
+                       ports {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               port@0 {
+                                       reg = <0>;
+                                       mdp5_intf1_out: endpoint {
+                                               remote-endpoint = <&dsi0_in>;
+                                       };
+                               };
+                       };
+               };
+
+               dsi0: dsi@1a98000 {
+                       ...
+                       ports {
+                               ...
+                               port@0 {
+                                       reg = <0>;
+                                       dsi0_in: endpoint {
+                                               remote-endpoint = <&mdp5_intf1_out>;
+                                       };
+                               };
+                               ...
+                       };
+                       ...
+               };
+
+               dsi_phy0: dsi-phy@1a98300 {
+                       ...
+               };
+       };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt b/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
new file mode 100644 (file)
index 0000000..b9877ac
--- /dev/null
@@ -0,0 +1,7 @@
+LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "lg,lp079qx1-sp0v"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
new file mode 100644 (file)
index 0000000..4214151
--- /dev/null
@@ -0,0 +1,7 @@
+LG 9.7" (2048x1536 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "lg,lp097qx1-spa1"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
index 216c894d4f99514d606151aabe152ec5014837b4..b52ac52757df3daf4776249980db74338174a721 100644 (file)
@@ -7,6 +7,8 @@ Required properties:
 Optional properties:
 - label: a symbolic name for the panel
 - enable-gpios: panel enable gpio
+- reset-gpios: GPIO to control the RESET pin
+- vcc-supply: phandle of regulator that will be used to enable power to the display
 
 Required nodes:
 - "panel-timing" containing video timings
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt b/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
new file mode 100644 (file)
index 0000000..dba298b
--- /dev/null
@@ -0,0 +1,7 @@
+Samsung 12.2" (2560x1600 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "samsung,lsn122dl01-c01"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
new file mode 100644 (file)
index 0000000..4aff25b
--- /dev/null
@@ -0,0 +1,7 @@
+Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq101k1ly04"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
new file mode 100644 (file)
index 0000000..bcb0e8a
--- /dev/null
@@ -0,0 +1,7 @@
+Sharp 12.3" (2400x1600 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "sharp,lq123p1jx31"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt b/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
new file mode 100644 (file)
index 0000000..1e87fe6
--- /dev/null
@@ -0,0 +1,7 @@
+Starry 12.2" (1920x1200 pixels) TFT LCD panel
+
+Required properties:
+- compatible: should be "starry,kr122ea0sra"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
index e832ff98fd61c1e527ac40a92cf80f1df0e20594..01cced1c2a180cfe4307cc5cfb0f70b8616f3d81 100644 (file)
@@ -2,7 +2,8 @@ Rockchip RK3288 specific extensions to the Analogix Display Port
 ================================
 
 Required properties:
-- compatible: "rockchip,rk3288-edp";
+- compatible: "rockchip,rk3288-dp",
+             "rockchip,rk3399-edp";
 
 - reg: physical base address of the controller and length
 
@@ -27,6 +28,12 @@ Required properties:
     Port 0: contained 2 endpoints, connecting to the output of vop.
     Port 1: contained 1 endpoint, connecting to the input of panel.
 
+Optional property for different chips:
+- clocks: from common clock binding: handle to grf_vio clock.
+
+- clock-names: from common clock binding:
+              Required elements: "grf"
+
 For the below properties, please refer to Analogix DP binding document:
  * Documentation/devicetree/bindings/drm/bridge/analogix_dp.txt
 - phys (required)
index a3bd8c050c4ef2f84fcdc29f9325006af14ff01b..0fad7ed2ea191201af1a136d1fee19bd94e67ff2 100644 (file)
@@ -208,6 +208,7 @@ of the following host1x client modules:
     See ../clocks/clock-bindings.txt for details.
   - clock-names: Must include the following entries:
     - sor: clock input for the SOR hardware
+    - source: source clock for the SOR clock
     - parent: input for the pixel clock
     - dp: reference clock for the SOR clock
     - safe: safe reference for the SOR clock during power up
@@ -226,9 +227,9 @@ of the following host1x client modules:
   - nvidia,dpaux: phandle to a DispayPort AUX interface
 
 - dpaux: DisplayPort AUX interface
-  - compatible: For Tegra124, must contain "nvidia,tegra124-dpaux".  Otherwise,
-    must contain '"nvidia,<chip>-dpaux", "nvidia,tegra124-dpaux"', where
-    <chip> is tegra132.
+  - compatible : Should contain one of the following:
+    - "nvidia,tegra124-dpaux": for Tegra124 and Tegra132
+    - "nvidia,tegra210-dpaux": for Tegra210
   - reg: Physical base address and length of the controller's registers.
   - interrupts: The interrupt outputs from the controller.
   - clocks: Must contain an entry for each entry in clock-names.
@@ -241,6 +242,12 @@ of the following host1x client modules:
   - reset-names: Must include the following entries:
     - dpaux
   - vdd-supply: phandle of a supply that powers the DisplayPort link
+  - i2c-bus: Subnode where I2C slave devices are listed. This subnode
+    must be always present. If there are no I2C slave devices, an empty
+    node should be added. See ../../i2c/i2c.txt for more information.
+
+  See ../pinctrl/nvidia,tegra124-dpaux-padctl.txt for information
+  regarding the DPAUX pad controller bindings.
 
 Example:
 
index 9bcd5e87830d23882fa930c3b5214a94252f7957..02af0d94e9217d19e8bbec945ec10e338b6eeafd 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
        - "ti,ina220" for ina220
        - "ti,ina226" for ina226
        - "ti,ina230" for ina230
+       - "ti,ina231" for ina231
 - reg: I2C address
 
 Optional properties:
index bfeabb843941e0c74f509f25a96e15f3daffa711..71191ff0e7814c958502fbc7ee79c6d0f71c9b02 100644 (file)
@@ -44,8 +44,8 @@ Required properties:
 - our-claim-gpio: The GPIO that we use to claim the bus.
 - their-claim-gpios: The GPIOs that the other sides use to claim the bus.
   Note that some implementations may only support a single other master.
-- Standard I2C mux properties. See mux.txt in this directory.
-- Single I2C child bus node at reg 0. See mux.txt in this directory.
+- Standard I2C mux properties. See i2c-mux.txt in this directory.
+- Single I2C child bus node at reg 0. See i2c-mux.txt in this directory.
 
 Optional properties:
 - slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.
index 6078aefe7ed481bb9474623ac2dc67d8a0fda7f8..7ce23ac6130808da6a7f621f1991b13f17f185d6 100644 (file)
@@ -27,7 +27,8 @@ Required properties:
 - i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
                parents.
 
-Furthermore, I2C mux properties and child nodes. See mux.txt in this directory.
+Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this
+directory.
 
 Example:
 
index 66709a8255413417231608568ed76b7966e25e0b..21da3ecbb3700c06bc80cb3685ee31e56bc1471b 100644 (file)
@@ -22,8 +22,8 @@ Required properties:
 - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
   port is connected to.
 - mux-gpios: list of gpios used to control the muxer
-* Standard I2C mux properties. See mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
 
 Optional properties:
 - idle-state: value to set the muxer to when idle. When no value is
@@ -33,7 +33,7 @@ For each i2c child node, an I2C child bus will be created. They will
 be numbered based on their order in the device tree.
 
 Whenever an access is made to a device on a child bus, the value set
-in the revelant node's reg property will be output using the list of
+in the relevant node's reg property will be output using the list of
 GPIOs, the first in the list holding the least-significant value.
 
 If an idle state is defined, using the idle-state (optional) property,
index ae8af1694e950e9df436bee3143a6c0ca207c3b9..33119a98e1447681f5dbb04eb55df352ede95d6d 100644 (file)
@@ -28,9 +28,9 @@ Also required are:
 * Standard pinctrl properties that specify the pin mux state for each child
   bus. See ../pinctrl/pinctrl-bindings.txt.
 
-* Standard I2C mux properties. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
 
-* I2C child bus nodes. See mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
 
 For each named state defined in the pinctrl-names property, an I2C child bus
 will be created. I2C child bus numbers are assigned based on the index into
index 688783fbe696bebfc7daa9796ef299bcd108aa9c..de00d7fc450b005333e874b4b831a5585594d50b 100644 (file)
@@ -7,8 +7,8 @@ Required properties:
 - compatible: i2c-mux-reg
 - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
   port is connected to.
-* Standard I2C mux properties. See mux.txt in this directory.
-* I2C child bus nodes. See mux.txt in this directory.
+* Standard I2C mux properties. See i2c-mux.txt in this directory.
+* I2C child bus nodes. See i2c-mux.txt in this directory.
 
 Optional properties:
 - reg: this pair of <offset size> specifies the register to control the mux.
@@ -24,7 +24,7 @@ Optional properties:
   given, it defaults to the last value used.
 
 Whenever an access is made to a device on a child bus, the value set
-in the revelant node's reg property will be output to the register.
+in the relevant node's reg property will be output to the register.
 
 If an idle state is defined, using the idle-state (optional) property,
 whenever an access is not being made to a device on a child bus, the
index 14aa6cf58201e4b92b191d0af535bc0d60aa66c1..6a9a63cb054304aa2a367b6381c116aadf0979f9 100644 (file)
@@ -13,10 +13,10 @@ Optional properties:
                      initialization. This is an array of 28 values(u8).
 
   - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip.
-                       firmware will use the pin to wakeup host system.
+                       firmware will use the pin to wakeup host system (u16).
   - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host
                      platform. The value will be configured to firmware. This
-                     is needed to work chip's sleep feature as expected.
+                     is needed to work chip's sleep feature as expected (u16).
   - interrupt-parent: phandle of the parent interrupt controller
   - interrupts : interrupt pin number to the cpu. Driver will request an irq based
                 on this interrupt number. During system suspend, the irq will be
@@ -50,7 +50,7 @@ calibration data is also available in below example.
                        0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02
                        0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00
                        0x00 0x00 0xf0 0x00>;
-               marvell,wakeup-pin = <0x0d>;
-               marvell,wakeup-gap-ms = <0x64>;
+               marvell,wakeup-pin = /bits/ 16 <0x0d>;
+               marvell,wakeup-gap-ms = /bits/ 16 <0x64>;
        };
 };
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra124-dpaux-padctl.txt
new file mode 100644 (file)
index 0000000..f2abdae
--- /dev/null
@@ -0,0 +1,60 @@
+Device tree binding for NVIDIA Tegra DPAUX pad controller
+========================================================
+
+The Tegra Display Port Auxiliary (DPAUX) pad controller manages two pins
+which can be assigned to either the DPAUX channel or to an I2C
+controller.
+
+This document defines the device-specific binding for the DPAUX pad
+controller. Refer to pinctrl-bindings.txt in this directory for generic
+information about pin controller device tree bindings. Please refer to
+the binding document ../display/tegra/nvidia,tegra20-host1x.txt for more
+details on the DPAUX binding.
+
+Pin muxing:
+-----------
+
+Child nodes contain the pinmux configurations following the conventions
+from the pinctrl-bindings.txt document.
+
+Since only three configurations are possible, only three child nodes are
+needed to describe the pin mux'ing options for the DPAUX pads.
+Furthermore, given that the pad functions are only applicable to a
+single set of pads, the child nodes only need to describe the pad group
+the functions are being applied to rather than the individual pads.
+
+Required properties:
+- groups: Must be "dpaux-io"
+- function: Must be either "aux", "i2c" or "off".
+
+Example:
+--------
+
+       dpaux@545c0000 {
+               ...
+
+               state_dpaux_aux: pinmux-aux {
+                       groups = "dpaux-io";
+                       function = "aux";
+               };
+
+               state_dpaux_i2c: pinmux-i2c {
+                       groups = "dpaux-io";
+                       function = "i2c";
+               };
+
+               state_dpaux_off: pinmux-off {
+                       groups = "dpaux-io";
+                       function = "off";
+               };
+       };
+
+       ...
+
+       i2c@7000d100 {
+               ...
+               pinctrl-0 = <&state_dpaux_i2c>;
+               pinctrl-1 = <&state_dpaux_off>;
+               pinctrl-names = "default", "idle";
+               status = "disabled";
+       };
index a7440bcd67ffc9ac43afe39564cb434ce8e58bc6..44722769209f13c8457fd9adbd09eec7696643c1 100644 (file)
@@ -247,6 +247,7 @@ sony        Sony Corporation
 spansion       Spansion Inc.
 sprd   Spreadtrum Communications Inc.
 st     STMicroelectronics
+starry Starry Electronic Technology (ShenZhen) Co., LTD
 startek        Startek
 ste    ST-Ericsson
 stericsson     ST-Ericsson
@@ -255,6 +256,7 @@ synology    Synology, Inc.
 SUNW   Sun Microsystems, Inc
 tbs    TBS Technologies
 tcl    Toby Churchill Ltd.
+technexion     TechNexion
 technologic    Technologic Systems
 thine  THine Electronics, Inc.
 ti     Texas Instruments
@@ -269,6 +271,7 @@ tronsmart   Tronsmart
 truly  Truly Semiconductors Limited
 tyan   Tyan Computer Corporation
 upisemi        uPI Semiconductor Corp.
+uniwest        United Western Technologies Corp (UniWest)
 urt    United Radiant Technology Corporation
 usi    Universal Scientific Industrial Co., Ltd.
 v3     V3 Semiconductor
index 122b7f4876bb637bf4023826703a12936bd326c7..91ce82d5f0c4f6bb951c4b406792144a7fc374ca 100644 (file)
@@ -323,7 +323,7 @@ supported.
    * device_resume
      - Resumes a transfer on the channel
      - This command should operate synchronously on the channel,
-       pausing right away the work of the given channel
+       resuming right away the work of the given channel
 
    * device_terminate_all
      - Aborts all the pending and ongoing transfers on the channel
index 30d2fcb32f72aa54a6d7d256cbad1c10b96d9897..9f94fe276dea9cc6e8ec2983d43b6adb72d9f43f 100644 (file)
+Each mount of the devpts filesystem is now distinct such that ptys
+and their indicies allocated in one mount are independent from ptys
+and their indicies in all other mounts.
 
-To support containers, we now allow multiple instances of devpts filesystem,
-such that indices of ptys allocated in one instance are independent of indices
-allocated in other instances of devpts.
+All mounts of the devpts filesystem now create a /dev/pts/ptmx node
+with permissions 0000.
 
-To preserve backward compatibility, this support for multiple instances is
-enabled only if:
+To retain backwards compatibility the a ptmx device node (aka any node
+created with "mknod name c 5 2") when opened will look for an instance
+of devpts under the name "pts" in the same directory as the ptmx device
+node.
 
-       - CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and
-       - '-o newinstance' mount option is specified while mounting devpts
-
-IOW, devpts now supports both single-instance and multi-instance semantics.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and
-this referred to as the "legacy" mode. In this mode, the new mount options
-(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message
-on console.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the
-'newinstance' option (as in current start-up scripts) the new mount binds
-to the initial kernel mount of devpts. This mode is referred to as the
-'single-instance' mode and the current, single-instance semantics are
-preserved, i.e PTYs are common across the system.
-
-The only difference between this single-instance mode and the legacy mode
-is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which
-can safely be ignored.
-
-If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified,
-the mount is considered to be in the multi-instance mode and a new instance
-of the devpts fs is created. Any ptys created in this instance are independent
-of ptys in other instances of devpts. Like in the single-instance mode, the
-/dev/pts/ptmx node is present. To effectively use the multi-instance mode,
-open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or
-bind-mount.
-
-Eg: A container startup script could do the following:
-
-       $ chmod 0666 /dev/pts/ptmx
-       $ rm /dev/ptmx
-       $ ln -s pts/ptmx /dev/ptmx
-       $ ns_exec -cm /bin/bash
-
-       # We are now in new container
-
-       $ umount /dev/pts
-       $ mount -t devpts -o newinstance lxcpts /dev/pts
-       $ sshd -p 1234
-
-where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs
-/bin/bash in the child process.  A pty created by the sshd is not visible in
-the original mount of /dev/pts.
+As an option instead of placing a /dev/ptmx device node at /dev/ptmx
+it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or
+to bind mount /dev/ptx/ptmx to /dev/ptmx.  If you opt for using
+the devpts filesystem in this manner devpts should be mounted with
+the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called.
 
 Total count of pty pairs in all instances is limited by sysctls:
 kernel.pty.max = 4096          - global limit
-kernel.pty.reserve = 1024      - reserve for initial instance
+kernel.pty.reserve = 1024      - reserved for filesystems mounted from the initial mount namespace
 kernel.pty.nr                  - current count of ptys
 
 Per-instance limit could be set by adding mount option "max=<count>".
 This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve.
 In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit.
-
-User-space changes
-------------------
-
-In multi-instance mode (i.e '-o newinstance' mount option is specified at least
-once), following user-space issues should be noted.
-
-1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored
-   and no change is needed to system-startup scripts.
-
-2. To effectively use multi-instance mode (i.e -o newinstance is specified)
-   administrators or startup scripts should "redirect" open of /dev/ptmx to
-   /dev/pts/ptmx using either a bind mount or symlink.
-
-       $ mount -t devpts -o newinstance devpts /dev/pts
-
-   followed by either
-
-       $ rm /dev/ptmx
-       $ ln -s pts/ptmx /dev/ptmx
-       $ chmod 666 /dev/pts/ptmx
-   or
-       $ mount -o bind /dev/pts/ptmx /dev/ptmx
-
-3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it
-   enables better error-reporting and treats both single-instance and
-   multi-instance mounts similarly.
-
-   But this method requires that system-startup scripts set the mode of
-   /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the
-   mode by, either
-
-       - adding ptmxmode mount option to devpts entry in /etc/fstab, or
-       - using 'chmod 0666 /dev/pts/ptmx'
-
-4. If multi-instance mode mount is needed for containers, but the system
-   startup scripts have not yet been updated, container-startup scripts
-   should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single-
-   instance mounts.
-
-   Or, in general, container-startup scripts should use:
-
-       mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts
-       if [ ! -L /dev/ptmx ]; then
-               mount -o bind /dev/pts/ptmx /dev/ptmx
-       fi
-
-   When all devpts mounts are multi-instance, /dev/ptmx can permanently be
-   a symlink to pts/ptmx and the bind mount can be ignored.
-
-5. A multi-instance mount that is not accompanied by the /dev/ptmx to
-   /dev/pts/ptmx redirection would result in an unusable/unreachable pty.
-
-       mount -t devpts -o newinstance lxcpts /dev/pts
-
-   immediately followed by:
-
-       open("/dev/ptmx")
-
-    would create a pty, say /dev/pts/7, in the initial kernel mount.
-    But /dev/pts/7 would be invisible in the new mount.
-
-6. The permissions for /dev/pts/ptmx node should be specified when mounting
-   /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000).
-
-       mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts
-
-   The permissions can be later be changed as usual with 'chmod'.
-
-       chmod 666 /dev/pts/ptmx
-
-7. A mount of devpts without the 'newinstance' option results in binding to
-   initial kernel mount.  This behavior while preserving legacy semantics,
-   does not provide strict isolation in a container environment. i.e by
-   mounting devpts without the 'newinstance' option, a container could
-   get visibility into the 'host' or root container's devpts.
-   
-   To workaround this and have strict isolation, all mounts of devpts,
-   including the mount in the root container, should use the newinstance
-   option.
index 4ab7d43d07544f7a22b35e49f9eb691eb3e9204d..7050ce8794b9a4b3dd93b76dd9e2a6d708b468ee 100644 (file)
@@ -139,27 +139,6 @@ Examples of using the Linux-provided gdb helpers
       start_comm = "swapper/2\000\000\000\000\000\000"
     }
 
- o Dig into a radix tree data structure, such as the IRQ descriptors:
-    (gdb) print (struct irq_desc)$lx_radix_tree_lookup(irq_desc_tree, 18)
-    $6 = {
-      irq_common_data = {
-        state_use_accessors = 67584,
-        handler_data = 0x0 <__vectors_start>,
-        msi_desc = 0x0 <__vectors_start>,
-        affinity = {{
-            bits = {65535}
-          }}
-      },
-      irq_data = {
-        mask = 0,
-        irq = 18,
-        hwirq = 27,
-        common = 0xee803d80,
-        chip = 0xc0eb0854 <gic_data>,
-        domain = 0xee808000,
-        parent_data = 0x0 <__vectors_start>,
-        chip_data = 0xc0eb0854 <gic_data>
-      } <... trimmed ...>
 
 List of commands and functions
 ------------------------------
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
new file mode 100644 (file)
index 0000000..3bb2613
--- /dev/null
@@ -0,0 +1,381 @@
+=============
+DRM Internals
+=============
+
+This chapter documents DRM internals relevant to driver authors and
+developers working to add support for the latest features to existing
+drivers.
+
+First, we go over some typical driver initialization requirements, like
+setting up command buffers, creating an initial output configuration,
+and initializing core services. Subsequent sections cover core internals
+in more detail, providing implementation notes and examples.
+
+The DRM layer provides several services to graphics drivers, many of
+them driven by the application interfaces it provides through libdrm,
+the library that wraps most of the DRM ioctls. These include vblank
+event handling, memory management, output management, framebuffer
+management, command submission & fencing, suspend/resume support, and
+DMA services.
+
+Driver Initialization
+=====================
+
+At the core of every DRM driver is a :c:type:`struct drm_driver
+<drm_driver>` structure. Drivers typically statically initialize
+a drm_driver structure, and then pass it to
+:c:func:`drm_dev_alloc()` to allocate a device instance. After the
+device instance is fully initialized it can be registered (which makes
+it accessible from userspace) using :c:func:`drm_dev_register()`.
+
+The :c:type:`struct drm_driver <drm_driver>` structure
+contains static information that describes the driver and features it
+supports, and pointers to methods that the DRM core will call to
+implement the DRM API. We will first go through the :c:type:`struct
+drm_driver <drm_driver>` static information fields, and will
+then describe individual operations in details as they get used in later
+sections.
+
+Driver Information
+------------------
+
+Driver Features
+~~~~~~~~~~~~~~~
+
+Drivers inform the DRM core about their requirements and supported
+features by setting appropriate flags in the driver_features field.
+Since those flags influence the DRM core behaviour since registration
+time, most of them must be set to registering the :c:type:`struct
+drm_driver <drm_driver>` instance.
+
+u32 driver_features;
+
+DRIVER_USE_AGP
+    Driver uses AGP interface, the DRM core will manage AGP resources.
+
+DRIVER_REQUIRE_AGP
+    Driver needs AGP interface to function. AGP initialization failure
+    will become a fatal error.
+
+DRIVER_PCI_DMA
+    Driver is capable of PCI DMA, mapping of PCI DMA buffers to
+    userspace will be enabled. Deprecated.
+
+DRIVER_SG
+    Driver can perform scatter/gather DMA, allocation and mapping of
+    scatter/gather buffers will be enabled. Deprecated.
+
+DRIVER_HAVE_DMA
+    Driver supports DMA, the userspace DMA API will be supported.
+    Deprecated.
+
+DRIVER_HAVE_IRQ; DRIVER_IRQ_SHARED
+    DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler
+    managed by the DRM Core. The core will support simple IRQ handler
+    installation when the flag is set. The installation process is
+    described in ?.
+
+    DRIVER_IRQ_SHARED indicates whether the device & handler support
+    shared IRQs (note that this is required of PCI drivers).
+
+DRIVER_GEM
+    Driver use the GEM memory manager.
+
+DRIVER_MODESET
+    Driver supports mode setting interfaces (KMS).
+
+DRIVER_PRIME
+    Driver implements DRM PRIME buffer sharing.
+
+DRIVER_RENDER
+    Driver supports dedicated render nodes.
+
+DRIVER_ATOMIC
+    Driver supports atomic properties. In this case the driver must
+    implement appropriate obj->atomic_get_property() vfuncs for any
+    modeset objects with driver specific properties.
+
+Major, Minor and Patchlevel
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+int major; int minor; int patchlevel;
+The DRM core identifies driver versions by a major, minor and patch
+level triplet. The information is printed to the kernel log at
+initialization time and passed to userspace through the
+DRM_IOCTL_VERSION ioctl.
+
+The major and minor numbers are also used to verify the requested driver
+API version passed to DRM_IOCTL_SET_VERSION. When the driver API
+changes between minor versions, applications can call
+DRM_IOCTL_SET_VERSION to select a specific version of the API. If the
+requested major isn't equal to the driver major, or the requested minor
+is larger than the driver minor, the DRM_IOCTL_SET_VERSION call will
+return an error. Otherwise the driver's set_version() method will be
+called with the requested version.
+
+Name, Description and Date
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+char \*name; char \*desc; char \*date;
+The driver name is printed to the kernel log at initialization time,
+used for IRQ registration and passed to userspace through
+DRM_IOCTL_VERSION.
+
+The driver description is a purely informative string passed to
+userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
+the kernel.
+
+The driver date, formatted as YYYYMMDD, is meant to identify the date of
+the latest modification to the driver. However, as most drivers fail to
+update it, its value is mostly useless. The DRM core prints it to the
+kernel log at initialization time and passes it to userspace through the
+DRM_IOCTL_VERSION ioctl.
+
+Device Instance and Driver Handling
+-----------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_drv.c
+   :doc: driver instance overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_drv.c
+   :export:
+
+Driver Load
+-----------
+
+IRQ Registration
+~~~~~~~~~~~~~~~~
+
+The DRM core tries to facilitate IRQ handler registration and
+unregistration by providing :c:func:`drm_irq_install()` and
+:c:func:`drm_irq_uninstall()` functions. Those functions only
+support a single interrupt per device, devices that use more than one
+IRQs need to be handled manually.
+
+Managed IRQ Registration
+''''''''''''''''''''''''
+
+:c:func:`drm_irq_install()` starts by calling the irq_preinstall
+driver operation. The operation is optional and must make sure that the
+interrupt will not get fired by clearing all pending interrupt flags or
+disabling the interrupt.
+
+The passed-in IRQ will then be requested by a call to
+:c:func:`request_irq()`. If the DRIVER_IRQ_SHARED driver feature
+flag is set, a shared (IRQF_SHARED) IRQ handler will be requested.
+
+The IRQ handler function must be provided as the mandatory irq_handler
+driver operation. It will get passed directly to
+:c:func:`request_irq()` and thus has the same prototype as all IRQ
+handlers. It will get called with a pointer to the DRM device as the
+second argument.
+
+Finally the function calls the optional irq_postinstall driver
+operation. The operation usually enables interrupts (excluding the
+vblank interrupt, which is enabled separately), but drivers may choose
+to enable/disable interrupts at a different time.
+
+:c:func:`drm_irq_uninstall()` is similarly used to uninstall an
+IRQ handler. It starts by waking up all processes waiting on a vblank
+interrupt to make sure they don't hang, and then calls the optional
+irq_uninstall driver operation. The operation must disable all hardware
+interrupts. Finally the function frees the IRQ by calling
+:c:func:`free_irq()`.
+
+Manual IRQ Registration
+'''''''''''''''''''''''
+
+Drivers that require multiple interrupt handlers can't use the managed
+IRQ registration functions. In that case IRQs must be registered and
+unregistered manually (usually with the :c:func:`request_irq()` and
+:c:func:`free_irq()` functions, or their :c:func:`devm_request_irq()` and
+:c:func:`devm_free_irq()` equivalents).
+
+When manually registering IRQs, drivers must not set the
+DRIVER_HAVE_IRQ driver feature flag, and must not provide the
+irq_handler driver operation. They must set the :c:type:`struct
+drm_device <drm_device>` irq_enabled field to 1 upon
+registration of the IRQs, and clear it to 0 after unregistering the
+IRQs.
+
+Memory Manager Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Every DRM driver requires a memory manager which must be initialized at
+load time. DRM currently contains two memory managers, the Translation
+Table Manager (TTM) and the Graphics Execution Manager (GEM). This
+document describes the use of the GEM memory manager only. See ? for
+details.
+
+Miscellaneous Device Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Another task that may be necessary for PCI devices during configuration
+is mapping the video BIOS. On many devices, the VBIOS describes device
+configuration, LCD panel timings (if any), and contains flags indicating
+device state. Mapping the BIOS can be done using the pci_map_rom()
+call, a convenience function that takes care of mapping the actual ROM,
+whether it has been shadowed into memory (typically at address 0xc0000)
+or exists on the PCI device in the ROM BAR. Note that after the ROM has
+been mapped and any necessary information has been extracted, it should
+be unmapped; on many devices, the ROM address decoder is shared with
+other BARs, so leaving it mapped could cause undesired behaviour like
+hangs or memory corruption.
+
+Bus-specific Device Registration and PCI Support
+------------------------------------------------
+
+A number of functions are provided to help with device registration. The
+functions deal with PCI and platform devices respectively and are only
+provided for historical reasons. These are all deprecated and shouldn't
+be used in new drivers. Besides that there's a few helpers for pci
+drivers.
+
+.. kernel-doc:: drivers/gpu/drm/drm_pci.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_platform.c
+   :export:
+
+Open/Close, File Operations and IOCTLs
+======================================
+
+Open and Close
+--------------
+
+Open and close handlers. None of those methods are mandatory::
+
+    int (*firstopen) (struct drm_device *);
+    void (*lastclose) (struct drm_device *);
+    int (*open) (struct drm_device *, struct drm_file *);
+    void (*preclose) (struct drm_device *, struct drm_file *);
+    void (*postclose) (struct drm_device *, struct drm_file *);
+
+The firstopen method is called by the DRM core for legacy UMS (User Mode
+Setting) drivers only when an application opens a device that has no
+other opened file handle. UMS drivers can implement it to acquire device
+resources. KMS drivers can't use the method and must acquire resources
+in the load method instead.
+
+Similarly the lastclose method is called when the last application
+holding a file handle opened on the device closes it, for both UMS and
+KMS drivers. Additionally, the method is also called at module unload
+time or, for hot-pluggable devices, when the device is unplugged. The
+firstopen and lastclose calls can thus be unbalanced.
+
+The open method is called every time the device is opened by an
+application. Drivers can allocate per-file private data in this method
+and store them in the struct :c:type:`struct drm_file
+<drm_file>` driver_priv field. Note that the open method is
+called before firstopen.
+
+The close operation is split into preclose and postclose methods.
+Drivers must stop and cleanup all per-file operations in the preclose
+method. For instance pending vertical blanking and page flip events must
+be cancelled. No per-file operation is allowed on the file handle after
+returning from the preclose method.
+
+Finally the postclose method is called as the last step of the close
+operation, right before calling the lastclose method if no other open
+file handle exists for the device. Drivers that have allocated per-file
+private data in the open method should free it here.
+
+The lastclose method should restore CRTC and plane properties to default
+value, so that a subsequent open of the device will not inherit state
+from the previous user. It can also be used to execute delayed power
+switching state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
+infrastructure. Beyond that KMS drivers should not do any
+further cleanup. Only legacy UMS drivers might need to clean up device
+state so that the vga console or an independent fbdev driver could take
+over.
+
+File Operations
+---------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_fops.c
+   :doc: file operations
+
+.. kernel-doc:: drivers/gpu/drm/drm_fops.c
+   :export:
+
+IOCTLs
+------
+
+struct drm_ioctl_desc \*ioctls; int num_ioctls;
+    Driver-specific ioctls descriptors table.
+
+Driver-specific ioctls numbers start at DRM_COMMAND_BASE. The ioctls
+descriptors table is indexed by the ioctl number offset from the base
+value. Drivers can use the DRM_IOCTL_DEF_DRV() macro to initialize
+the table entries.
+
+::
+
+    DRM_IOCTL_DEF_DRV(ioctl, func, flags)
+
+``ioctl`` is the ioctl name. Drivers must define the DRM_##ioctl and
+DRM_IOCTL_##ioctl macros to the ioctl number offset from
+DRM_COMMAND_BASE and the ioctl number respectively. The first macro is
+private to the device while the second must be exposed to userspace in a
+public header.
+
+``func`` is a pointer to the ioctl handler function compatible with the
+``drm_ioctl_t`` type.
+
+::
+
+    typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+            struct drm_file *file_priv);
+
+``flags`` is a bitmask combination of the following values. It restricts
+how the ioctl is allowed to be called.
+
+-  DRM_AUTH - Only authenticated callers allowed
+
+-  DRM_MASTER - The ioctl can only be called on the master file handle
+
+-  DRM_ROOT_ONLY - Only callers with the SYSADMIN capability allowed
+
+-  DRM_CONTROL_ALLOW - The ioctl can only be called on a control
+   device
+
+-  DRM_UNLOCKED - The ioctl handler will be called without locking the
+   DRM global mutex. This is the enforced default for kms drivers (i.e.
+   using the DRIVER_MODESET flag) and hence shouldn't be used any more
+   for new drivers.
+
+.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
+   :export:
+
+Legacy Support Code
+===================
+
+The section very briefly covers some of the old legacy support code
+which is only used by old DRM drivers which have done a so-called
+shadow-attach to the underlying device instead of registering as a real
+driver. This also includes some of the old generic buffer management and
+command submission code. Do not use any of this in new and modern
+drivers.
+
+Legacy Suspend/Resume
+---------------------
+
+The DRM core provides some suspend/resume code, but drivers wanting full
+suspend/resume support should provide save() and restore() functions.
+These are called at suspend, hibernate, or resume time, and should
+perform any state save or restore required by your device across suspend
+or hibernate states.
+
+int (\*suspend) (struct drm_device \*, pm_message_t state); int
+(\*resume) (struct drm_device \*);
+Those are legacy suspend and resume methods which *only* work with the
+legacy shadow-attach driver registration functions. New driver should
+use the power management interface provided by their bus type (usually
+through the :c:type:`struct device_driver <device_driver>`
+dev_pm_ops) and set these methods to NULL.
+
+Legacy DMA Services
+-------------------
+
+This should cover how DMA mapping etc. is supported by the core. These
+functions are deprecated and should not be used.
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
new file mode 100644 (file)
index 0000000..0b302fe
--- /dev/null
@@ -0,0 +1,260 @@
+=============================
+Mode Setting Helper Functions
+=============================
+
+The plane, CRTC, encoder and connector functions provided by the drivers
+implement the DRM API. They're called by the DRM core and ioctl handlers
+to handle device state changes and configuration request. As
+implementing those functions often requires logic not specific to
+drivers, mid-layer helper functions are available to avoid duplicating
+boilerplate code.
+
+The DRM core contains one mid-layer implementation. The mid-layer
+provides implementations of several plane, CRTC, encoder and connector
+functions (called from the top of the mid-layer) that pre-process
+requests and call lower-level functions provided by the driver (at the
+bottom of the mid-layer). For instance, the
+:c:func:`drm_crtc_helper_set_config()` function can be used to
+fill the :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`
+set_config field. When called, it will split the set_config operation
+in smaller, simpler operations and call the driver to handle them.
+
+To use the mid-layer, drivers call
+:c:func:`drm_crtc_helper_add()`,
+:c:func:`drm_encoder_helper_add()` and
+:c:func:`drm_connector_helper_add()` functions to install their
+mid-layer bottom operations handlers, and fill the :c:type:`struct
+drm_crtc_funcs <drm_crtc_funcs>`, :c:type:`struct
+drm_encoder_funcs <drm_encoder_funcs>` and :c:type:`struct
+drm_connector_funcs <drm_connector_funcs>` structures with
+pointers to the mid-layer top API functions. Installing the mid-layer
+bottom operation handlers is best done right after registering the
+corresponding KMS object.
+
+The mid-layer is not split between CRTC, encoder and connector
+operations. To use it, a driver must provide bottom functions for all of
+the three KMS entities.
+
+Atomic Modeset Helper Functions Reference
+=========================================
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+   :doc: overview
+
+Implementing Asynchronous Atomic Commit
+---------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+   :doc: implementing nonblocking commit
+
+Atomic State Reset and Initialization
+-------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+   :doc: atomic state reset and initialization
+
+.. kernel-doc:: include/drm/drm_atomic_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+   :export:
+
+Modeset Helper Reference for Common Vtables
+===========================================
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+   :internal:
+
+.. kernel-doc:: include/drm/drm_modeset_helper_vtables.h
+   :doc: overview
+
+Legacy CRTC/Modeset Helper Functions Reference
+==============================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
+   :doc: overview
+
+Output Probing Helper Functions Reference
+=========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+   :doc: output probing helper overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_probe_helper.c
+   :export:
+
+fbdev Helper Functions Reference
+================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
+   :doc: fbdev helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_fb_helper.h
+   :internal:
+
+Framebuffer CMA Helper Functions Reference
+==========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
+   :doc: framebuffer cma helper functions
+
+.. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c
+   :export:
+
+Display Port Helper Functions Reference
+=======================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
+   :doc: dp helpers
+
+.. kernel-doc:: include/drm/drm_dp_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_helper.c
+   :export:
+
+Display Port Dual Mode Adaptor Helper Functions Reference
+=========================================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
+   :doc: dp dual mode helpers
+
+.. kernel-doc:: include/drm/drm_dp_dual_mode_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c
+   :export:
+
+Display Port MST Helper Functions Reference
+===========================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+   :doc: dp mst helper
+
+.. kernel-doc:: include/drm/drm_dp_mst_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c
+   :export:
+
+MIPI DSI Helper Functions Reference
+===================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
+   :doc: dsi helpers
+
+.. kernel-doc:: include/drm/drm_mipi_dsi.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
+   :export:
+
+EDID Helper Functions Reference
+===============================
+
+.. kernel-doc:: drivers/gpu/drm/drm_edid.c
+   :export:
+
+Rectangle Utilities Reference
+=============================
+
+.. kernel-doc:: include/drm/drm_rect.h
+   :doc: rect utils
+
+.. kernel-doc:: include/drm/drm_rect.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_rect.c
+   :export:
+
+Flip-work Helper Reference
+==========================
+
+.. kernel-doc:: include/drm/drm_flip_work.h
+   :doc: flip utils
+
+.. kernel-doc:: include/drm/drm_flip_work.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_flip_work.c
+   :export:
+
+HDMI Infoframes Helper Reference
+================================
+
+Strictly speaking this is not a DRM helper library but generally useable
+by any driver interfacing with HDMI outputs like v4l or alsa drivers.
+But it nicely fits into the overall topic of mode setting helper
+libraries and hence is also included here.
+
+.. kernel-doc:: include/linux/hdmi.h
+   :internal:
+
+.. kernel-doc:: drivers/video/hdmi.c
+   :export:
+
+Plane Helper Reference
+======================
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_plane_helper.c
+   :doc: overview
+
+Tile group
+----------
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+   :doc: Tile group
+
+Bridges
+=======
+
+Overview
+--------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :doc: overview
+
+Default bridge callback sequence
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :doc: bridge callbacks
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
+   :export:
+
+Panel Helper Reference
+======================
+
+.. kernel-doc:: include/drm/drm_panel.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_panel.c
+   :doc: drm panel
+
+Simple KMS Helper Reference
+===========================
+
+.. kernel-doc:: include/drm/drm_simple_kms_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c
+   :doc: overview
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
new file mode 100644 (file)
index 0000000..8dfa4b2
--- /dev/null
@@ -0,0 +1,653 @@
+=========================
+Kernel Mode Setting (KMS)
+=========================
+
+Mode Setting
+============
+
+Drivers must initialize the mode setting core by calling
+:c:func:`drm_mode_config_init()` on the DRM device. The function
+initializes the :c:type:`struct drm_device <drm_device>`
+mode_config field and never fails. Once done, mode configuration must
+be setup by initializing the following fields.
+
+-  int min_width, min_height; int max_width, max_height;
+   Minimum and maximum width and height of the frame buffers in pixel
+   units.
+
+-  struct drm_mode_config_funcs \*funcs;
+   Mode setting functions.
+
+Display Modes Function Reference
+--------------------------------
+
+.. kernel-doc:: include/drm/drm_modes.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_modes.c
+   :export:
+
+Atomic Mode Setting Function Reference
+--------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+   :export:
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+   :internal:
+
+Frame Buffer Abstraction
+------------------------
+
+Frame buffers are abstract memory objects that provide a source of
+pixels to scanout to a CRTC. Applications explicitly request the
+creation of frame buffers through the DRM_IOCTL_MODE_ADDFB(2) ioctls
+and receive an opaque handle that can be passed to the KMS CRTC control,
+plane configuration and page flip functions.
+
+Frame buffers rely on the underneath memory manager for low-level memory
+operations. When creating a frame buffer applications pass a memory
+handle (or a list of memory handles for multi-planar formats) through
+the ``drm_mode_fb_cmd2`` argument. For drivers using GEM as their
+userspace buffer management interface this would be a GEM handle.
+Drivers are however free to use their own backing storage object
+handles, e.g. vmwgfx directly exposes special TTM handles to userspace
+and so expects TTM handles in the create ioctl and not GEM handles.
+
+The lifetime of a drm framebuffer is controlled with a reference count,
+drivers can grab additional references with
+:c:func:`drm_framebuffer_reference()`and drop them again with
+:c:func:`drm_framebuffer_unreference()`. For driver-private
+framebuffers for which the last reference is never dropped (e.g. for the
+fbdev framebuffer when the struct :c:type:`struct drm_framebuffer
+<drm_framebuffer>` is embedded into the fbdev helper struct)
+drivers can manually clean up a framebuffer at module unload time with
+:c:func:`drm_framebuffer_unregister_private()`.
+
+DRM Format Handling
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
+   :export:
+
+Dumb Buffer Objects
+-------------------
+
+The KMS API doesn't standardize backing storage object creation and
+leaves it to driver-specific ioctls. Furthermore actually creating a
+buffer object even for GEM-based drivers is done through a
+driver-specific ioctl - GEM only has a common userspace interface for
+sharing and destroying objects. While not an issue for full-fledged
+graphics stacks that include device-specific userspace components (in
+libdrm for instance), this limit makes DRM-based early boot graphics
+unnecessarily complex.
+
+Dumb objects partly alleviate the problem by providing a standard API to
+create dumb buffers suitable for scanout, which can then be used to
+create KMS frame buffers.
+
+To support dumb objects drivers must implement the dumb_create,
+dumb_destroy and dumb_map_offset operations.
+
+-  int (\*dumb_create)(struct drm_file \*file_priv, struct
+   drm_device \*dev, struct drm_mode_create_dumb \*args);
+   The dumb_create operation creates a driver object (GEM or TTM
+   handle) suitable for scanout based on the width, height and depth
+   from the struct :c:type:`struct drm_mode_create_dumb
+   <drm_mode_create_dumb>` argument. It fills the argument's
+   handle, pitch and size fields with a handle for the newly created
+   object and its line pitch and size in bytes.
+
+-  int (\*dumb_destroy)(struct drm_file \*file_priv, struct
+   drm_device \*dev, uint32_t handle);
+   The dumb_destroy operation destroys a dumb object created by
+   dumb_create.
+
+-  int (\*dumb_map_offset)(struct drm_file \*file_priv, struct
+   drm_device \*dev, uint32_t handle, uint64_t \*offset);
+   The dumb_map_offset operation associates an mmap fake offset with
+   the object given by the handle and returns it. Drivers must use the
+   :c:func:`drm_gem_create_mmap_offset()` function to associate
+   the fake offset as described in ?.
+
+Note that dumb objects may not be used for gpu acceleration, as has been
+attempted on some ARM embedded platforms. Such drivers really must have
+a hardware-specific ioctl to allocate suitable buffer objects.
+
+Output Polling
+--------------
+
+void (\*output_poll_changed)(struct drm_device \*dev);
+This operation notifies the driver that the status of one or more
+connectors has changed. Drivers that use the fb helper can just call the
+:c:func:`drm_fb_helper_hotplug_event()` function to handle this
+operation.
+
+KMS Initialization and Cleanup
+==============================
+
+A KMS device is abstracted and exposed as a set of planes, CRTCs,
+encoders and connectors. KMS drivers must thus create and initialize all
+those objects at load time after initializing mode setting.
+
+CRTCs (:c:type:`struct drm_crtc <drm_crtc>`)
+--------------------------------------------
+
+A CRTC is an abstraction representing a part of the chip that contains a
+pointer to a scanout buffer. Therefore, the number of CRTCs available
+determines how many independent scanout buffers can be active at any
+given time. The CRTC structure contains several fields to support this:
+a pointer to some video memory (abstracted as a frame buffer object), a
+display mode, and an (x, y) offset into the video memory to support
+panning or configurations where one piece of video memory spans multiple
+CRTCs.
+
+CRTC Initialization
+~~~~~~~~~~~~~~~~~~~
+
+A KMS device must create and register at least one struct
+:c:type:`struct drm_crtc <drm_crtc>` instance. The instance is
+allocated and zeroed by the driver, possibly as part of a larger
+structure, and registered with a call to :c:func:`drm_crtc_init()`
+with a pointer to CRTC functions.
+
+Planes (:c:type:`struct drm_plane <drm_plane>`)
+-----------------------------------------------
+
+A plane represents an image source that can be blended with or overlayed
+on top of a CRTC during the scanout process. Planes are associated with
+a frame buffer to crop a portion of the image memory (source) and
+optionally scale it to a destination size. The result is then blended
+with or overlayed on top of a CRTC.
+
+The DRM core recognizes three types of planes:
+
+-  DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC.
+   Primary planes are the planes operated upon by CRTC modesetting and
+   flipping operations described in the page_flip hook in
+   :c:type:`struct drm_crtc_funcs <drm_crtc_funcs>`.
+-  DRM_PLANE_TYPE_CURSOR represents a "cursor" plane for a CRTC.
+   Cursor planes are the planes operated upon by the
+   DRM_IOCTL_MODE_CURSOR and DRM_IOCTL_MODE_CURSOR2 ioctls.
+-  DRM_PLANE_TYPE_OVERLAY represents all non-primary, non-cursor
+   planes. Some drivers refer to these types of planes as "sprites"
+   internally.
+
+For compatibility with legacy userspace, only overlay planes are made
+available to userspace by default. Userspace clients may set the
+DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate
+that they wish to receive a universal plane list containing all plane
+types.
+
+Plane Initialization
+~~~~~~~~~~~~~~~~~~~~
+
+To create a plane, a KMS drivers allocates and zeroes an instances of
+:c:type:`struct drm_plane <drm_plane>` (possibly as part of a
+larger structure) and registers it with a call to
+:c:func:`drm_universal_plane_init()`. The function takes a
+bitmask of the CRTCs that can be associated with the plane, a pointer to
+the plane functions, a list of format supported formats, and the type of
+plane (primary, cursor, or overlay) being initialized.
+
+Cursor and overlay planes are optional. All drivers should provide one
+primary plane per CRTC (although this requirement may change in the
+future); drivers that do not wish to provide special handling for
+primary planes may make use of the helper functions described in ? to
+create and register a primary plane with standard capabilities.
+
+Encoders (:c:type:`struct drm_encoder <drm_encoder>`)
+-----------------------------------------------------
+
+An encoder takes pixel data from a CRTC and converts it to a format
+suitable for any attached connectors. On some devices, it may be
+possible to have a CRTC send data to more than one encoder. In that
+case, both encoders would receive data from the same scanout buffer,
+resulting in a "cloned" display configuration across the connectors
+attached to each encoder.
+
+Encoder Initialization
+~~~~~~~~~~~~~~~~~~~~~~
+
+As for CRTCs, a KMS driver must create, initialize and register at least
+one :c:type:`struct drm_encoder <drm_encoder>` instance. The
+instance is allocated and zeroed by the driver, possibly as part of a
+larger structure.
+
+Drivers must initialize the :c:type:`struct drm_encoder
+<drm_encoder>` possible_crtcs and possible_clones fields before
+registering the encoder. Both fields are bitmasks of respectively the
+CRTCs that the encoder can be connected to, and sibling encoders
+candidate for cloning.
+
+After being initialized, the encoder must be registered with a call to
+:c:func:`drm_encoder_init()`. The function takes a pointer to the
+encoder functions and an encoder type. Supported types are
+
+-  DRM_MODE_ENCODER_DAC for VGA and analog on DVI-I/DVI-A
+-  DRM_MODE_ENCODER_TMDS for DVI, HDMI and (embedded) DisplayPort
+-  DRM_MODE_ENCODER_LVDS for display panels
+-  DRM_MODE_ENCODER_TVDAC for TV output (Composite, S-Video,
+   Component, SCART)
+-  DRM_MODE_ENCODER_VIRTUAL for virtual machine displays
+
+Encoders must be attached to a CRTC to be used. DRM drivers leave
+encoders unattached at initialization time. Applications (or the fbdev
+compatibility layer when implemented) are responsible for attaching the
+encoders they want to use to a CRTC.
+
+Connectors (:c:type:`struct drm_connector <drm_connector>`)
+-----------------------------------------------------------
+
+A connector is the final destination for pixel data on a device, and
+usually connects directly to an external display device like a monitor
+or laptop panel. A connector can only be attached to one encoder at a
+time. The connector is also the structure where information about the
+attached display is kept, so it contains fields for display data, EDID
+data, DPMS & connection status, and information about modes supported on
+the attached displays.
+
+Connector Initialization
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Finally a KMS driver must create, initialize, register and attach at
+least one :c:type:`struct drm_connector <drm_connector>`
+instance. The instance is created as other KMS objects and initialized
+by setting the following fields.
+
+interlace_allowed
+    Whether the connector can handle interlaced modes.
+
+doublescan_allowed
+    Whether the connector can handle doublescan.
+
+display_info
+    Display information is filled from EDID information when a display
+    is detected. For non hot-pluggable displays such as flat panels in
+    embedded systems, the driver should initialize the
+    display_info.width_mm and display_info.height_mm fields with the
+    physical size of the display.
+
+polled
+    Connector polling mode, a combination of
+
+    DRM_CONNECTOR_POLL_HPD
+        The connector generates hotplug events and doesn't need to be
+        periodically polled. The CONNECT and DISCONNECT flags must not
+        be set together with the HPD flag.
+
+    DRM_CONNECTOR_POLL_CONNECT
+        Periodically poll the connector for connection.
+
+    DRM_CONNECTOR_POLL_DISCONNECT
+        Periodically poll the connector for disconnection.
+
+    Set to 0 for connectors that don't support connection status
+    discovery.
+
+The connector is then registered with a call to
+:c:func:`drm_connector_init()` with a pointer to the connector
+functions and a connector type, and exposed through sysfs with a call to
+:c:func:`drm_connector_register()`.
+
+Supported connector types are
+
+-  DRM_MODE_CONNECTOR_VGA
+-  DRM_MODE_CONNECTOR_DVII
+-  DRM_MODE_CONNECTOR_DVID
+-  DRM_MODE_CONNECTOR_DVIA
+-  DRM_MODE_CONNECTOR_Composite
+-  DRM_MODE_CONNECTOR_SVIDEO
+-  DRM_MODE_CONNECTOR_LVDS
+-  DRM_MODE_CONNECTOR_Component
+-  DRM_MODE_CONNECTOR_9PinDIN
+-  DRM_MODE_CONNECTOR_DisplayPort
+-  DRM_MODE_CONNECTOR_HDMIA
+-  DRM_MODE_CONNECTOR_HDMIB
+-  DRM_MODE_CONNECTOR_TV
+-  DRM_MODE_CONNECTOR_eDP
+-  DRM_MODE_CONNECTOR_VIRTUAL
+
+Connectors must be attached to an encoder to be used. For devices that
+map connectors to encoders 1:1, the connector should be attached at
+initialization time with a call to
+:c:func:`drm_mode_connector_attach_encoder()`. The driver must
+also set the :c:type:`struct drm_connector <drm_connector>`
+encoder field to point to the attached encoder.
+
+Finally, drivers must initialize the connectors state change detection
+with a call to :c:func:`drm_kms_helper_poll_init()`. If at least
+one connector is pollable but can't generate hotplug interrupts
+(indicated by the DRM_CONNECTOR_POLL_CONNECT and
+DRM_CONNECTOR_POLL_DISCONNECT connector flags), a delayed work will
+automatically be queued to periodically poll for changes. Connectors
+that can generate hotplug interrupts must be marked with the
+DRM_CONNECTOR_POLL_HPD flag instead, and their interrupt handler must
+call :c:func:`drm_helper_hpd_irq_event()`. The function will
+queue a delayed work to check the state of all connectors, but no
+periodic polling will be done.
+
+Connector Operations
+~~~~~~~~~~~~~~~~~~~~
+
+    **Note**
+
+    Unless otherwise state, all operations are mandatory.
+
+DPMS
+''''
+
+void (\*dpms)(struct drm_connector \*connector, int mode);
+The DPMS operation sets the power state of a connector. The mode
+argument is one of
+
+-  DRM_MODE_DPMS_ON
+
+-  DRM_MODE_DPMS_STANDBY
+
+-  DRM_MODE_DPMS_SUSPEND
+
+-  DRM_MODE_DPMS_OFF
+
+In all but DPMS_ON mode the encoder to which the connector is attached
+should put the display in low-power mode by driving its signals
+appropriately. If more than one connector is attached to the encoder
+care should be taken not to change the power state of other displays as
+a side effect. Low-power mode should be propagated to the encoders and
+CRTCs when all related connectors are put in low-power mode.
+
+Modes
+'''''
+
+int (\*fill_modes)(struct drm_connector \*connector, uint32_t
+max_width, uint32_t max_height);
+Fill the mode list with all supported modes for the connector. If the
+``max_width`` and ``max_height`` arguments are non-zero, the
+implementation must ignore all modes wider than ``max_width`` or higher
+than ``max_height``.
+
+The connector must also fill in this operation its display_info
+width_mm and height_mm fields with the connected display physical size
+in millimeters. The fields should be set to 0 if the value isn't known
+or is not applicable (for instance for projector devices).
+
+Connection Status
+'''''''''''''''''
+
+The connection status is updated through polling or hotplug events when
+supported (see ?). The status value is reported to userspace through
+ioctls and must not be used inside the driver, as it only gets
+initialized by a call to :c:func:`drm_mode_getconnector()` from
+userspace.
+
+enum drm_connector_status (\*detect)(struct drm_connector
+\*connector, bool force);
+Check to see if anything is attached to the connector. The ``force``
+parameter is set to false whilst polling or to true when checking the
+connector due to user request. ``force`` can be used by the driver to
+avoid expensive, destructive operations during automated probing.
+
+Return connector_status_connected if something is connected to the
+connector, connector_status_disconnected if nothing is connected and
+connector_status_unknown if the connection state isn't known.
+
+Drivers should only return connector_status_connected if the
+connection status has really been probed as connected. Connectors that
+can't detect the connection status, or failed connection status probes,
+should return connector_status_unknown.
+
+Cleanup
+-------
+
+The DRM core manages its objects' lifetime. When an object is not needed
+anymore the core calls its destroy function, which must clean up and
+free every resource allocated for the object. Every
+:c:func:`drm_\*_init()` call must be matched with a corresponding
+:c:func:`drm_\*_cleanup()` call to cleanup CRTCs
+(:c:func:`drm_crtc_cleanup()`), planes
+(:c:func:`drm_plane_cleanup()`), encoders
+(:c:func:`drm_encoder_cleanup()`) and connectors
+(:c:func:`drm_connector_cleanup()`). Furthermore, connectors that
+have been added to sysfs must be removed by a call to
+:c:func:`drm_connector_unregister()` before calling
+:c:func:`drm_connector_cleanup()`.
+
+Connectors state change detection must be cleanup up with a call to
+:c:func:`drm_kms_helper_poll_fini()`.
+
+Output discovery and initialization example
+-------------------------------------------
+
+::
+
+    void intel_crt_init(struct drm_device *dev)
+    {
+        struct drm_connector *connector;
+        struct intel_output *intel_output;
+
+        intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
+        if (!intel_output)
+            return;
+
+        connector = &intel_output->base;
+        drm_connector_init(dev, &intel_output->base,
+                   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+        drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs,
+                 DRM_MODE_ENCODER_DAC);
+
+        drm_mode_connector_attach_encoder(&intel_output->base,
+                          &intel_output->enc);
+
+        /* Set up the DDC bus. */
+        intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A");
+        if (!intel_output->ddc_bus) {
+            dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+                   "failed.\n");
+            return;
+        }
+
+        intel_output->type = INTEL_OUTPUT_ANALOG;
+        connector->interlace_allowed = 0;
+        connector->doublescan_allowed = 0;
+
+        drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs);
+        drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
+
+        drm_connector_register(connector);
+    }
+
+In the example above (taken from the i915 driver), a CRTC, connector and
+encoder combination is created. A device-specific i2c bus is also
+created for fetching EDID data and performing monitor detection. Once
+the process is complete, the new connector is registered with sysfs to
+make its properties available to applications.
+
+KMS API Functions
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_crtc.c
+   :export:
+
+KMS Data Structures
+-------------------
+
+.. kernel-doc:: include/drm/drm_crtc.h
+   :internal:
+
+KMS Locking
+-----------
+
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_lock.c
+   :doc: kms locking
+
+.. kernel-doc:: include/drm/drm_modeset_lock.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_modeset_lock.c
+   :export:
+
+KMS Properties
+==============
+
+Drivers may need to expose additional parameters to applications than
+those described in the previous sections. KMS supports attaching
+properties to CRTCs, connectors and planes and offers a userspace API to
+list, get and set the property values.
+
+Properties are identified by a name that uniquely defines the property
+purpose, and store an associated value. For all property types except
+blob properties the value is a 64-bit unsigned integer.
+
+KMS differentiates between properties and property instances. Drivers
+first create properties and then create and associate individual
+instances of those properties to objects. A property can be instantiated
+multiple times and associated with different objects. Values are stored
+in property instances, and all other property information are stored in
+the property and shared between all instances of the property.
+
+Every property is created with a type that influences how the KMS core
+handles the property. Supported property types are
+
+DRM_MODE_PROP_RANGE
+    Range properties report their minimum and maximum admissible values.
+    The KMS core verifies that values set by application fit in that
+    range.
+
+DRM_MODE_PROP_ENUM
+    Enumerated properties take a numerical value that ranges from 0 to
+    the number of enumerated values defined by the property minus one,
+    and associate a free-formed string name to each value. Applications
+    can retrieve the list of defined value-name pairs and use the
+    numerical value to get and set property instance values.
+
+DRM_MODE_PROP_BITMASK
+    Bitmask properties are enumeration properties that additionally
+    restrict all enumerated values to the 0..63 range. Bitmask property
+    instance values combine one or more of the enumerated bits defined
+    by the property.
+
+DRM_MODE_PROP_BLOB
+    Blob properties store a binary blob without any format restriction.
+    The binary blobs are created as KMS standalone objects, and blob
+    property instance values store the ID of their associated blob
+    object.
+
+    Blob properties are only used for the connector EDID property and
+    cannot be created by drivers.
+
+To create a property drivers call one of the following functions
+depending on the property type. All property creation functions take
+property flags and name, as well as type-specific arguments.
+
+-  struct drm_property \*drm_property_create_range(struct
+   drm_device \*dev, int flags, const char \*name, uint64_t min,
+   uint64_t max);
+   Create a range property with the given minimum and maximum values.
+
+-  struct drm_property \*drm_property_create_enum(struct drm_device
+   \*dev, int flags, const char \*name, const struct
+   drm_prop_enum_list \*props, int num_values);
+   Create an enumerated property. The ``props`` argument points to an
+   array of ``num_values`` value-name pairs.
+
+-  struct drm_property \*drm_property_create_bitmask(struct
+   drm_device \*dev, int flags, const char \*name, const struct
+   drm_prop_enum_list \*props, int num_values);
+   Create a bitmask property. The ``props`` argument points to an array
+   of ``num_values`` value-name pairs.
+
+Properties can additionally be created as immutable, in which case they
+will be read-only for applications but can be modified by the driver. To
+create an immutable property drivers must set the
+DRM_MODE_PROP_IMMUTABLE flag at property creation time.
+
+When no array of value-name pairs is readily available at property
+creation time for enumerated or range properties, drivers can create the
+property using the :c:func:`drm_property_create()` function and
+manually add enumeration value-name pairs by calling the
+:c:func:`drm_property_add_enum()` function. Care must be taken to
+properly specify the property type through the ``flags`` argument.
+
+After creating properties drivers can attach property instances to CRTC,
+connector and plane objects by calling the
+:c:func:`drm_object_attach_property()`. The function takes a
+pointer to the target object, a pointer to the previously created
+property and an initial instance value.
+
+Existing KMS Properties
+-----------------------
+
+The following table gives description of drm properties exposed by
+various modules/drivers.
+
+.. csv-table::
+   :header-rows: 1
+   :file: kms-properties.csv
+
+Vertical Blanking
+=================
+
+Vertical blanking plays a major role in graphics rendering. To achieve
+tear-free display, users must synchronize page flips and/or rendering to
+vertical blanking. The DRM API offers ioctls to perform page flips
+synchronized to vertical blanking and wait for vertical blanking.
+
+The DRM core handles most of the vertical blanking management logic,
+which involves filtering out spurious interrupts, keeping race-free
+blanking counters, coping with counter wrap-around and resets and
+keeping use counts. It relies on the driver to generate vertical
+blanking interrupts and optionally provide a hardware vertical blanking
+counter. Drivers must implement the following operations.
+
+-  int (\*enable_vblank) (struct drm_device \*dev, int crtc); void
+   (\*disable_vblank) (struct drm_device \*dev, int crtc);
+   Enable or disable vertical blanking interrupts for the given CRTC.
+
+-  u32 (\*get_vblank_counter) (struct drm_device \*dev, int crtc);
+   Retrieve the value of the vertical blanking counter for the given
+   CRTC. If the hardware maintains a vertical blanking counter its value
+   should be returned. Otherwise drivers can use the
+   :c:func:`drm_vblank_count()` helper function to handle this
+   operation.
+
+Drivers must initialize the vertical blanking handling core with a call
+to :c:func:`drm_vblank_init()` in their load operation.
+
+Vertical blanking interrupts can be enabled by the DRM core or by
+drivers themselves (for instance to handle page flipping operations).
+The DRM core maintains a vertical blanking use count to ensure that the
+interrupts are not disabled while a user still needs them. To increment
+the use count, drivers call :c:func:`drm_vblank_get()`. Upon
+return vertical blanking interrupts are guaranteed to be enabled.
+
+To decrement the use count drivers call
+:c:func:`drm_vblank_put()`. Only when the use count drops to zero
+will the DRM core disable the vertical blanking interrupts after a delay
+by scheduling a timer. The delay is accessible through the
+vblankoffdelay module parameter or the ``drm_vblank_offdelay`` global
+variable and expressed in milliseconds. Its default value is 5000 ms.
+Zero means never disable, and a negative value means disable
+immediately. Drivers may override the behaviour by setting the
+:c:type:`struct drm_device <drm_device>`
+vblank_disable_immediate flag, which when set causes vblank interrupts
+to be disabled immediately regardless of the drm_vblank_offdelay
+value. The flag should only be set if there's a properly working
+hardware vblank counter present.
+
+When a vertical blanking interrupt occurs drivers only need to call the
+:c:func:`drm_handle_vblank()` function to account for the
+interrupt.
+
+Resources allocated by :c:func:`drm_vblank_init()` must be freed
+with a call to :c:func:`drm_vblank_cleanup()` in the driver unload
+operation handler.
+
+Vertical Blanking and Interrupt Handling Functions Reference
+------------------------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_irq.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_irq.h
+   :internal:
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
new file mode 100644 (file)
index 0000000..59f9822
--- /dev/null
@@ -0,0 +1,454 @@
+=====================
+DRM Memory Management
+=====================
+
+Modern Linux systems require large amount of graphics memory to store
+frame buffers, textures, vertices and other graphics-related data. Given
+the very dynamic nature of many of that data, managing graphics memory
+efficiently is thus crucial for the graphics stack and plays a central
+role in the DRM infrastructure.
+
+The DRM core includes two memory managers, namely Translation Table Maps
+(TTM) and Graphics Execution Manager (GEM). TTM was the first DRM memory
+manager to be developed and tried to be a one-size-fits-them all
+solution. It provides a single userspace API to accommodate the need of
+all hardware, supporting both Unified Memory Architecture (UMA) devices
+and devices with dedicated video RAM (i.e. most discrete video cards).
+This resulted in a large, complex piece of code that turned out to be
+hard to use for driver development.
+
+GEM started as an Intel-sponsored project in reaction to TTM's
+complexity. Its design philosophy is completely different: instead of
+providing a solution to every graphics memory-related problems, GEM
+identified common code between drivers and created a support library to
+share it. GEM has simpler initialization and execution requirements than
+TTM, but has no video RAM management capabilities and is thus limited to
+UMA devices.
+
+The Translation Table Manager (TTM)
+-----------------------------------
+
+TTM design background and information belongs here.
+
+TTM initialization
+~~~~~~~~~~~~~~~~~~
+
+    **Warning**
+
+    This section is outdated.
+
+Drivers wishing to support TTM must fill out a drm_bo_driver
+structure. The structure contains several fields with function pointers
+for initializing the TTM, allocating and freeing memory, waiting for
+command completion and fence synchronization, and memory migration. See
+the radeon_ttm.c file for an example of usage.
+
+The ttm_global_reference structure is made up of several fields:
+
+::
+
+              struct ttm_global_reference {
+                      enum ttm_global_types global_type;
+                      size_t size;
+                      void *object;
+                      int (*init) (struct ttm_global_reference *);
+                      void (*release) (struct ttm_global_reference *);
+              };
+
+
+There should be one global reference structure for your memory manager
+as a whole, and there will be others for each object created by the
+memory manager at runtime. Your global TTM should have a type of
+TTM_GLOBAL_TTM_MEM. The size field for the global object should be
+sizeof(struct ttm_mem_global), and the init and release hooks should
+point at your driver-specific init and release routines, which probably
+eventually call ttm_mem_global_init and ttm_mem_global_release,
+respectively.
+
+Once your global TTM accounting structure is set up and initialized by
+calling ttm_global_item_ref() on it, you need to create a buffer
+object TTM to provide a pool for buffer object allocation by clients and
+the kernel itself. The type of this object should be
+TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct
+ttm_bo_global). Again, driver-specific init and release functions may
+be provided, likely eventually calling ttm_bo_global_init() and
+ttm_bo_global_release(), respectively. Also, like the previous
+object, ttm_global_item_ref() is used to create an initial reference
+count for the TTM, which will call your initialization function.
+
+The Graphics Execution Manager (GEM)
+------------------------------------
+
+The GEM design approach has resulted in a memory manager that doesn't
+provide full coverage of all (or even all common) use cases in its
+userspace or kernel API. GEM exposes a set of standard memory-related
+operations to userspace and a set of helper functions to drivers, and
+let drivers implement hardware-specific operations with their own
+private API.
+
+The GEM userspace API is described in the `GEM - the Graphics Execution
+Manager <http://lwn.net/Articles/283798/>`__ article on LWN. While
+slightly outdated, the document provides a good overview of the GEM API
+principles. Buffer allocation and read and write operations, described
+as part of the common GEM API, are currently implemented using
+driver-specific ioctls.
+
+GEM is data-agnostic. It manages abstract buffer objects without knowing
+what individual buffers contain. APIs that require knowledge of buffer
+contents or purpose, such as buffer allocation or synchronization
+primitives, are thus outside of the scope of GEM and must be implemented
+using driver-specific ioctls.
+
+On a fundamental level, GEM involves several operations:
+
+-  Memory allocation and freeing
+-  Command execution
+-  Aperture management at command execution time
+
+Buffer object allocation is relatively straightforward and largely
+provided by Linux's shmem layer, which provides memory to back each
+object.
+
+Device-specific operations, such as command execution, pinning, buffer
+read & write, mapping, and domain ownership transfers are left to
+driver-specific ioctls.
+
+GEM Initialization
+~~~~~~~~~~~~~~~~~~
+
+Drivers that use GEM must set the DRIVER_GEM bit in the struct
+:c:type:`struct drm_driver <drm_driver>` driver_features
+field. The DRM core will then automatically initialize the GEM core
+before calling the load operation. Behind the scene, this will create a
+DRM Memory Manager object which provides an address space pool for
+object allocation.
+
+In a KMS configuration, drivers need to allocate and initialize a
+command ring buffer following core GEM initialization if required by the
+hardware. UMA devices usually have what is called a "stolen" memory
+region, which provides space for the initial framebuffer and large,
+contiguous memory regions required by the device. This space is
+typically not managed by GEM, and must be initialized separately into
+its own DRM MM object.
+
+GEM Objects Creation
+~~~~~~~~~~~~~~~~~~~~
+
+GEM splits creation of GEM objects and allocation of the memory that
+backs them in two distinct operations.
+
+GEM objects are represented by an instance of struct :c:type:`struct
+drm_gem_object <drm_gem_object>`. Drivers usually need to
+extend GEM objects with private information and thus create a
+driver-specific GEM object structure type that embeds an instance of
+struct :c:type:`struct drm_gem_object <drm_gem_object>`.
+
+To create a GEM object, a driver allocates memory for an instance of its
+specific GEM object type and initializes the embedded struct
+:c:type:`struct drm_gem_object <drm_gem_object>` with a call
+to :c:func:`drm_gem_object_init()`. The function takes a pointer
+to the DRM device, a pointer to the GEM object and the buffer object
+size in bytes.
+
+GEM uses shmem to allocate anonymous pageable memory.
+:c:func:`drm_gem_object_init()` will create an shmfs file of the
+requested size and store it into the struct :c:type:`struct
+drm_gem_object <drm_gem_object>` filp field. The memory is
+used as either main storage for the object when the graphics hardware
+uses system memory directly or as a backing store otherwise.
+
+Drivers are responsible for the actual physical pages allocation by
+calling :c:func:`shmem_read_mapping_page_gfp()` for each page.
+Note that they can decide to allocate pages when initializing the GEM
+object, or to delay allocation until the memory is needed (for instance
+when a page fault occurs as a result of a userspace memory access or
+when the driver needs to start a DMA transfer involving the memory).
+
+Anonymous pageable memory allocation is not always desired, for instance
+when the hardware requires physically contiguous system memory as is
+often the case in embedded devices. Drivers can create GEM objects with
+no shmfs backing (called private GEM objects) by initializing them with
+a call to :c:func:`drm_gem_private_object_init()` instead of
+:c:func:`drm_gem_object_init()`. Storage for private GEM objects
+must be managed by drivers.
+
+GEM Objects Lifetime
+~~~~~~~~~~~~~~~~~~~~
+
+All GEM objects are reference-counted by the GEM core. References can be
+acquired and release by :c:func:`calling
+drm_gem_object_reference()` and
+:c:func:`drm_gem_object_unreference()` respectively. The caller
+must hold the :c:type:`struct drm_device <drm_device>`
+struct_mutex lock when calling
+:c:func:`drm_gem_object_reference()`. As a convenience, GEM
+provides :c:func:`drm_gem_object_unreference_unlocked()`
+functions that can be called without holding the lock.
+
+When the last reference to a GEM object is released the GEM core calls
+the :c:type:`struct drm_driver <drm_driver>` gem_free_object
+operation. That operation is mandatory for GEM-enabled drivers and must
+free the GEM object and all associated resources.
+
+void (\*gem_free_object) (struct drm_gem_object \*obj); Drivers are
+responsible for freeing all GEM object resources. This includes the
+resources created by the GEM core, which need to be released with
+:c:func:`drm_gem_object_release()`.
+
+GEM Objects Naming
+~~~~~~~~~~~~~~~~~~
+
+Communication between userspace and the kernel refers to GEM objects
+using local handles, global names or, more recently, file descriptors.
+All of those are 32-bit integer values; the usual Linux kernel limits
+apply to the file descriptors.
+
+GEM handles are local to a DRM file. Applications get a handle to a GEM
+object through a driver-specific ioctl, and can use that handle to refer
+to the GEM object in other standard or driver-specific ioctls. Closing a
+DRM file handle frees all its GEM handles and dereferences the
+associated GEM objects.
+
+To create a handle for a GEM object drivers call
+:c:func:`drm_gem_handle_create()`. The function takes a pointer
+to the DRM file and the GEM object and returns a locally unique handle.
+When the handle is no longer needed drivers delete it with a call to
+:c:func:`drm_gem_handle_delete()`. Finally the GEM object
+associated with a handle can be retrieved by a call to
+:c:func:`drm_gem_object_lookup()`.
+
+Handles don't take ownership of GEM objects, they only take a reference
+to the object that will be dropped when the handle is destroyed. To
+avoid leaking GEM objects, drivers must make sure they drop the
+reference(s) they own (such as the initial reference taken at object
+creation time) as appropriate, without any special consideration for the
+handle. For example, in the particular case of combined GEM object and
+handle creation in the implementation of the dumb_create operation,
+drivers must drop the initial reference to the GEM object before
+returning the handle.
+
+GEM names are similar in purpose to handles but are not local to DRM
+files. They can be passed between processes to reference a GEM object
+globally. Names can't be used directly to refer to objects in the DRM
+API, applications must convert handles to names and names to handles
+using the DRM_IOCTL_GEM_FLINK and DRM_IOCTL_GEM_OPEN ioctls
+respectively. The conversion is handled by the DRM core without any
+driver-specific support.
+
+GEM also supports buffer sharing with dma-buf file descriptors through
+PRIME. GEM-based drivers must use the provided helpers functions to
+implement the exporting and importing correctly. See ?. Since sharing
+file descriptors is inherently more secure than the easily guessable and
+global GEM names it is the preferred buffer sharing mechanism. Sharing
+buffers through GEM names is only supported for legacy userspace.
+Furthermore PRIME also allows cross-device buffer sharing since it is
+based on dma-bufs.
+
+GEM Objects Mapping
+~~~~~~~~~~~~~~~~~~~
+
+Because mapping operations are fairly heavyweight GEM favours
+read/write-like access to buffers, implemented through driver-specific
+ioctls, over mapping buffers to userspace. However, when random access
+to the buffer is needed (to perform software rendering for instance),
+direct access to the object can be more efficient.
+
+The mmap system call can't be used directly to map GEM objects, as they
+don't have their own file handle. Two alternative methods currently
+co-exist to map GEM objects to userspace. The first method uses a
+driver-specific ioctl to perform the mapping operation, calling
+:c:func:`do_mmap()` under the hood. This is often considered
+dubious, seems to be discouraged for new GEM-enabled drivers, and will
+thus not be described here.
+
+The second method uses the mmap system call on the DRM file handle. void
+\*mmap(void \*addr, size_t length, int prot, int flags, int fd, off_t
+offset); DRM identifies the GEM object to be mapped by a fake offset
+passed through the mmap offset argument. Prior to being mapped, a GEM
+object must thus be associated with a fake offset. To do so, drivers
+must call :c:func:`drm_gem_create_mmap_offset()` on the object.
+
+Once allocated, the fake offset value must be passed to the application
+in a driver-specific way and can then be used as the mmap offset
+argument.
+
+The GEM core provides a helper method :c:func:`drm_gem_mmap()` to
+handle object mapping. The method can be set directly as the mmap file
+operation handler. It will look up the GEM object based on the offset
+value and set the VMA operations to the :c:type:`struct drm_driver
+<drm_driver>` gem_vm_ops field. Note that
+:c:func:`drm_gem_mmap()` doesn't map memory to userspace, but
+relies on the driver-provided fault handler to map pages individually.
+
+To use :c:func:`drm_gem_mmap()`, drivers must fill the struct
+:c:type:`struct drm_driver <drm_driver>` gem_vm_ops field
+with a pointer to VM operations.
+
+struct vm_operations_struct \*gem_vm_ops struct
+vm_operations_struct { void (\*open)(struct vm_area_struct \* area);
+void (\*close)(struct vm_area_struct \* area); int (\*fault)(struct
+vm_area_struct \*vma, struct vm_fault \*vmf); };
+
+The open and close operations must update the GEM object reference
+count. Drivers can use the :c:func:`drm_gem_vm_open()` and
+:c:func:`drm_gem_vm_close()` helper functions directly as open
+and close handlers.
+
+The fault operation handler is responsible for mapping individual pages
+to userspace when a page fault occurs. Depending on the memory
+allocation scheme, drivers can allocate pages at fault time, or can
+decide to allocate memory for the GEM object at the time the object is
+created.
+
+Drivers that want to map the GEM object upfront instead of handling page
+faults can implement their own mmap file operation handler.
+
+Memory Coherency
+~~~~~~~~~~~~~~~~
+
+When mapped to the device or used in a command buffer, backing pages for
+an object are flushed to memory and marked write combined so as to be
+coherent with the GPU. Likewise, if the CPU accesses an object after the
+GPU has finished rendering to the object, then the object must be made
+coherent with the CPU's view of memory, usually involving GPU cache
+flushing of various kinds. This core CPU<->GPU coherency management is
+provided by a device-specific ioctl, which evaluates an object's current
+domain and performs any necessary flushing or synchronization to put the
+object into the desired coherency domain (note that the object may be
+busy, i.e. an active render target; in that case, setting the domain
+blocks the client and waits for rendering to complete before performing
+any necessary flushing operations).
+
+Command Execution
+~~~~~~~~~~~~~~~~~
+
+Perhaps the most important GEM function for GPU devices is providing a
+command execution interface to clients. Client programs construct
+command buffers containing references to previously allocated memory
+objects, and then submit them to GEM. At that point, GEM takes care to
+bind all the objects into the GTT, execute the buffer, and provide
+necessary synchronization between clients accessing the same buffers.
+This often involves evicting some objects from the GTT and re-binding
+others (a fairly expensive operation), and providing relocation support
+which hides fixed GTT offsets from clients. Clients must take care not
+to submit command buffers that reference more objects than can fit in
+the GTT; otherwise, GEM will reject them and no rendering will occur.
+Similarly, if several objects in the buffer require fence registers to
+be allocated for correct rendering (e.g. 2D blits on pre-965 chips),
+care must be taken not to require more fence registers than are
+available to the client. Such resource management should be abstracted
+from the client in libdrm.
+
+GEM Function Reference
+----------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_gem.h
+   :internal:
+
+VMA Offset Manager
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
+   :doc: vma offset manager
+
+.. kernel-doc:: drivers/gpu/drm/drm_vma_manager.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_vma_manager.h
+   :internal:
+
+PRIME Buffer Sharing
+--------------------
+
+PRIME is the cross device buffer sharing framework in drm, originally
+created for the OPTIMUS range of multi-gpu platforms. To userspace PRIME
+buffers are dma-buf based file descriptors.
+
+Overview and Driver Interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Similar to GEM global names, PRIME file descriptors are also used to
+share buffer objects across processes. They offer additional security:
+as file descriptors must be explicitly sent over UNIX domain sockets to
+be shared between applications, they can't be guessed like the globally
+unique GEM names.
+
+Drivers that support the PRIME API must set the DRIVER_PRIME bit in the
+struct :c:type:`struct drm_driver <drm_driver>`
+driver_features field, and implement the prime_handle_to_fd and
+prime_fd_to_handle operations.
+
+int (\*prime_handle_to_fd)(struct drm_device \*dev, struct drm_file
+\*file_priv, uint32_t handle, uint32_t flags, int \*prime_fd); int
+(\*prime_fd_to_handle)(struct drm_device \*dev, struct drm_file
+\*file_priv, int prime_fd, uint32_t \*handle); Those two operations
+convert a handle to a PRIME file descriptor and vice versa. Drivers must
+use the kernel dma-buf buffer sharing framework to manage the PRIME file
+descriptors. Similar to the mode setting API PRIME is agnostic to the
+underlying buffer object manager, as long as handles are 32bit unsigned
+integers.
+
+While non-GEM drivers must implement the operations themselves, GEM
+drivers must use the :c:func:`drm_gem_prime_handle_to_fd()` and
+:c:func:`drm_gem_prime_fd_to_handle()` helper functions. Those
+helpers rely on the driver gem_prime_export and gem_prime_import
+operations to create a dma-buf instance from a GEM object (dma-buf
+exporter role) and to create a GEM object from a dma-buf instance
+(dma-buf importer role).
+
+struct dma_buf \* (\*gem_prime_export)(struct drm_device \*dev,
+struct drm_gem_object \*obj, int flags); struct drm_gem_object \*
+(\*gem_prime_import)(struct drm_device \*dev, struct dma_buf
+\*dma_buf); These two operations are mandatory for GEM drivers that
+support PRIME.
+
+PRIME Helper Functions
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_prime.c
+   :doc: PRIME Helpers
+
+PRIME Function References
+-------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_prime.c
+   :export:
+
+DRM MM Range Allocator
+----------------------
+
+Overview
+~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_mm.c
+   :doc: Overview
+
+LRU Scan/Eviction Support
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_mm.c
+   :doc: lru scan roaster
+
+DRM MM Range Allocator Function References
+------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_mm.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_mm.h
+   :internal:
+
+CMA Helper Functions Reference
+------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+   :doc: cma helpers
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_cma_helper.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_gem_cma_helper.h
+   :internal:
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
new file mode 100644 (file)
index 0000000..536bf3e
--- /dev/null
@@ -0,0 +1,111 @@
+===================
+Userland interfaces
+===================
+
+The DRM core exports several interfaces to applications, generally
+intended to be used through corresponding libdrm wrapper functions. In
+addition, drivers export device-specific interfaces for use by userspace
+drivers & device-aware applications through ioctls and sysfs files.
+
+External interfaces include: memory mapping, context management, DMA
+operations, AGP management, vblank control, fence management, memory
+management, and output management.
+
+Cover generic ioctls and sysfs layout here. We only need high-level
+info, since man pages should cover the rest.
+
+libdrm Device Lookup
+====================
+
+.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
+   :doc: getunique and setversion story
+
+
+Primary Nodes, DRM Master and Authentication
+============================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_auth.c
+   :doc: master and authentication
+
+.. kernel-doc:: drivers/gpu/drm/drm_auth.c
+   :export:
+
+.. kernel-doc:: include/drm/drm_auth.h
+   :internal:
+
+Render nodes
+============
+
+DRM core provides multiple character-devices for user-space to use.
+Depending on which device is opened, user-space can perform a different
+set of operations (mainly ioctls). The primary node is always created
+and called card<num>. Additionally, a currently unused control node,
+called controlD<num> is also created. The primary node provides all
+legacy operations and historically was the only interface used by
+userspace. With KMS, the control node was introduced. However, the
+planned KMS control interface has never been written and so the control
+node stays unused to date.
+
+With the increased use of offscreen renderers and GPGPU applications,
+clients no longer require running compositors or graphics servers to
+make use of a GPU. But the DRM API required unprivileged clients to
+authenticate to a DRM-Master prior to getting GPU access. To avoid this
+step and to grant clients GPU access without authenticating, render
+nodes were introduced. Render nodes solely serve render clients, that
+is, no modesetting or privileged ioctls can be issued on render nodes.
+Only non-global rendering commands are allowed. If a driver supports
+render nodes, it must advertise it via the DRIVER_RENDER DRM driver
+capability. If not supported, the primary node must be used for render
+clients together with the legacy drmAuth authentication procedure.
+
+If a driver advertises render node support, DRM core will create a
+separate render node called renderD<num>. There will be one render node
+per device. No ioctls except PRIME-related ioctls will be allowed on
+this node. Especially GEM_OPEN will be explicitly prohibited. Render
+nodes are designed to avoid the buffer-leaks, which occur if clients
+guess the flink names or mmap offsets on the legacy interface.
+Additionally to this basic interface, drivers must mark their
+driver-dependent render-only ioctls as DRM_RENDER_ALLOW so render
+clients can use them. Driver authors must be careful not to allow any
+privileged ioctls on render nodes.
+
+With render nodes, user-space can now control access to the render node
+via basic file-system access-modes. A running graphics server which
+authenticates clients on the privileged primary/legacy node is no longer
+required. Instead, a client can open the render node and is immediately
+granted GPU access. Communication between clients (or servers) is done
+via PRIME. FLINK from render node to legacy node is not supported. New
+clients must not use the insecure FLINK interface.
+
+Besides dropping all modeset/global ioctls, render nodes also drop the
+DRM-Master concept. There is no reason to associate render clients with
+a DRM-Master as they are independent of any graphics server. Besides,
+they must work without any running master, anyway. Drivers must be able
+to run without a master object if they support render nodes. If, on the
+other hand, a driver requires shared state between clients which is
+visible to user-space and accessible beyond open-file boundaries, they
+cannot support render nodes.
+
+VBlank event handling
+=====================
+
+The DRM core exposes two vertical blank related ioctls:
+
+DRM_IOCTL_WAIT_VBLANK
+    This takes a struct drm_wait_vblank structure as its argument, and
+    it is used to block or request a signal when a specified vblank
+    event occurs.
+
+DRM_IOCTL_MODESET_CTL
+    This was only used for user-mode-settind drivers around modesetting
+    changes to allow the kernel to update the vblank interrupt after
+    mode setting, since on many devices the vertical blank counter is
+    reset to 0 at some point during modeset. Modern drivers should not
+    call this any more since with kernel mode setting it is a no-op.
+
+This second part of the GPU Driver Developer's Guide documents driver
+code, implementation details and also all the driver-specific userspace
+interfaces. Especially since all hardware-acceleration interfaces to
+userspace are driver specific for efficiency and other reasons these
+interfaces can be rather substantial. Hence every driver has its own
+chapter.
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
new file mode 100644 (file)
index 0000000..2fe5952
--- /dev/null
@@ -0,0 +1,347 @@
+===========================
+ drm/i915 Intel GFX Driver
+===========================
+
+The drm/i915 driver supports all (with the exception of some very early
+models) integrated GFX chipsets with both Intel display and rendering
+blocks. This excludes a set of SoC platforms with an SGX rendering unit,
+those have basic support through the gma500 drm driver.
+
+Core Driver Infrastructure
+==========================
+
+This section covers core driver infrastructure used by both the display
+and the GEM parts of the driver.
+
+Runtime Power Management
+------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c
+   :doc: runtime pm
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_uncore.c
+   :internal:
+
+Interrupt Handling
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
+   :doc: interrupt handling
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
+   :functions: intel_irq_init intel_irq_init_hw intel_hpd_init
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
+   :functions: intel_runtime_pm_disable_interrupts
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c
+   :functions: intel_runtime_pm_enable_interrupts
+
+Intel GVT-g Guest Support(vGPU)
+-------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
+   :doc: Intel GVT-g guest support
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c
+   :internal:
+
+Display Hardware Handling
+=========================
+
+This section covers everything related to the display hardware including
+the mode setting infrastructure, plane, sprite and cursor handling and
+display, output probing and related topics.
+
+Mode Setting Infrastructure
+---------------------------
+
+The i915 driver is thus far the only DRM driver which doesn't use the
+common DRM helper code to implement mode setting sequences. Thus it has
+its own tailor-made infrastructure for executing a display configuration
+change.
+
+Frontbuffer Tracking
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
+   :doc: frontbuffer tracking
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem.c
+   :functions: i915_gem_track_fb
+
+Display FIFO Underrun Reporting
+-------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_fifo_underrun.c
+   :doc: fifo underrun handling
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_fifo_underrun.c
+   :internal:
+
+Plane Configuration
+-------------------
+
+This section covers plane configuration and composition with the primary
+plane, sprites, cursors and overlays. This includes the infrastructure
+to do atomic vsync'ed updates of all this state and also tightly coupled
+topics like watermark setup and computation, framebuffer compression and
+panel self refresh.
+
+Atomic Plane Helpers
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_atomic_plane.c
+   :doc: atomic plane helpers
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_atomic_plane.c
+   :internal:
+
+Output Probing
+--------------
+
+This section covers output probing and related infrastructure like the
+hotplug interrupt storm detection and mitigation code. Note that the
+i915 driver still uses most of the common DRM helper code for output
+probing, so those sections fully apply.
+
+Hotplug
+-------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_hotplug.c
+   :doc: Hotplug
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_hotplug.c
+   :internal:
+
+High Definition Audio
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
+   :doc: High Definition Audio over HDMI and Display Port
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_audio.c
+   :internal:
+
+.. kernel-doc:: include/drm/i915_component.h
+   :internal:
+
+Panel Self Refresh PSR (PSR/SRD)
+--------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_psr.c
+   :doc: Panel Self Refresh (PSR/SRD)
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_psr.c
+   :internal:
+
+Frame Buffer Compression (FBC)
+------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_fbc.c
+   :doc: Frame Buffer Compression (FBC)
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_fbc.c
+   :internal:
+
+Display Refresh Rate Switching (DRRS)
+-------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+   :doc: Display Refresh Rate Switching (DRRS)
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+   :functions: intel_dp_set_drrs_state
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+   :functions: intel_edp_drrs_enable
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+   :functions: intel_edp_drrs_disable
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+   :functions: intel_edp_drrs_invalidate
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+   :functions: intel_edp_drrs_flush
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_dp.c
+   :functions: intel_dp_drrs_init
+
+DPIO
+----
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_reg.h
+   :doc: DPIO
+
+CSR firmware support for DMC
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
+   :doc: csr support for dmc
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
+   :internal:
+
+Video BIOS Table (VBT)
+----------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_bios.c
+   :doc: Video BIOS Table (VBT)
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_bios.c
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_vbt_defs.h
+   :internal:
+
+Memory Management and Command Submission
+========================================
+
+This sections covers all things related to the GEM implementation in the
+i915 driver.
+
+Batchbuffer Parsing
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_cmd_parser.c
+   :doc: batch buffer command parser
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_cmd_parser.c
+   :internal:
+
+Batchbuffer Pools
+-----------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
+   :doc: batch pool
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c
+   :internal:
+
+Logical Rings, Logical Ring Contexts and Execlists
+--------------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_lrc.c
+   :doc: Logical Rings, Logical Ring Contexts and Execlists
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_lrc.c
+   :internal:
+
+Global GTT views
+----------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
+   :doc: Global GTT views
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
+   :internal:
+
+GTT Fences and Swizzling
+------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+   :internal:
+
+Global GTT Fence Handling
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+   :doc: fence register handling
+
+Hardware Tiling and Swizzling Details
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_fence.c
+   :doc: tiling swizzling details
+
+Object Tiling IOCTLs
+--------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_tiling.c
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_tiling.c
+   :doc: buffer object tiling
+
+Buffer Object Eviction
+----------------------
+
+This section documents the interface functions for evicting buffer
+objects to make space available in the virtual gpu address spaces. Note
+that this is mostly orthogonal to shrinking buffer objects caches, which
+has the goal to make main memory (shared with the gpu through the
+unified memory architecture) available.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_evict.c
+   :internal:
+
+Buffer Object Memory Shrinking
+------------------------------
+
+This section documents the interface function for shrinking memory usage
+of buffer object caches. Shrinking is used to make main memory
+available. Note that this is mostly orthogonal to evicting buffer
+objects, which has the goal to make space in gpu virtual address spaces.
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_shrinker.c
+   :internal:
+
+GuC
+===
+
+GuC-specific firmware loader
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
+   :doc: GuC-specific firmware loader
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c
+   :internal:
+
+GuC-based command submission
+----------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
+   :doc: GuC-based command submission
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_guc_submission.c
+   :internal:
+
+GuC Firmware Layout
+-------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fwif.h
+   :doc: GuC Firmware Layout
+
+Tracing
+=======
+
+This sections covers all things related to the tracepoints implemented
+in the i915 driver.
+
+i915_ppgtt_create and i915_ppgtt_release
+----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
+   :doc: i915_ppgtt_create and i915_ppgtt_release tracepoints
+
+i915_context_create and i915_context_free
+-----------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
+   :doc: i915_context_create and i915_context_free tracepoints
+
+switch_mm
+---------
+
+.. kernel-doc:: drivers/gpu/drm/i915/i915_trace.h
+   :doc: switch_mm tracepoint
+
+.. WARNING: DOCPROC directive not supported: !Cdrivers/gpu/drm/i915/i915_irq.c
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
new file mode 100644 (file)
index 0000000..fcac0fa
--- /dev/null
@@ -0,0 +1,14 @@
+==================================
+Linux GPU Driver Developer's Guide
+==================================
+
+.. toctree::
+
+   introduction
+   drm-internals
+   drm-mm
+   drm-kms
+   drm-kms-helpers
+   drm-uapi
+   i915
+   vga-switcheroo
diff --git a/Documentation/gpu/introduction.rst b/Documentation/gpu/introduction.rst
new file mode 100644 (file)
index 0000000..1903595
--- /dev/null
@@ -0,0 +1,51 @@
+============
+Introduction
+============
+
+The Linux DRM layer contains code intended to support the needs of
+complex graphics devices, usually containing programmable pipelines well
+suited to 3D graphics acceleration. Graphics drivers in the kernel may
+make use of DRM functions to make tasks like memory management,
+interrupt handling and DMA easier, and provide a uniform interface to
+applications.
+
+A note on versions: this guide covers features found in the DRM tree,
+including the TTM memory manager, output configuration and mode setting,
+and the new vblank internals, in addition to all the regular features
+found in current kernels.
+
+[Insert diagram of typical DRM stack here]
+
+Style Guidelines
+================
+
+For consistency this documentation uses American English. Abbreviations
+are written as all-uppercase, for example: DRM, KMS, IOCTL, CRTC, and so
+on. To aid in reading, documentations make full use of the markup
+characters kerneldoc provides: @parameter for function parameters,
+@member for structure members, &structure to reference structures and
+function() for functions. These all get automatically hyperlinked if
+kerneldoc for the referenced objects exists. When referencing entries in
+function vtables please use ->vfunc(). Note that kerneldoc does not
+support referencing struct members directly, so please add a reference
+to the vtable struct somewhere in the same paragraph or at least
+section.
+
+Except in special situations (to separate locked from unlocked variants)
+locking requirements for functions aren't documented in the kerneldoc.
+Instead locking should be check at runtime using e.g.
+``WARN_ON(!mutex_is_locked(...));``. Since it's much easier to ignore
+documentation than runtime noise this provides more value. And on top of
+that runtime checks do need to be updated when the locking rules change,
+increasing the chances that they're correct. Within the documentation
+the locking rules should be explained in the relevant structures: Either
+in the comment for the lock explaining what it protects, or data fields
+need a note about which lock protects them, or both.
+
+Functions which have a non-\ ``void`` return value should have a section
+called "Returns" explaining the expected return values in different
+cases and their meanings. Currently there's no consensus whether that
+section name should be all upper-case or not, and whether it should end
+in a colon or not. Go with the file-local style. Other common section
+names are "Notes" with information for dangerous or tricky corner cases,
+and "FIXME" where the interface could be cleaned up.
diff --git a/Documentation/gpu/kms-properties.csv b/Documentation/gpu/kms-properties.csv
new file mode 100644 (file)
index 0000000..b6fcaf6
--- /dev/null
@@ -0,0 +1,128 @@
+Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,Description/Restrictions
+DRM,Generic,“rotation”,BITMASK,"{ 0, ""rotate-0"" }, { 1, ""rotate-90"" }, { 2, ""rotate-180"" }, { 3, ""rotate-270"" }, { 4, ""reflect-x"" }, { 5, ""reflect-y"" }","CRTC, Plane",rotate-(degrees) rotates the image by the specified amount in degrees in counter clockwise direction. reflect-x and reflect-y reflects the image along the specified axis prior to rotation
+,,“scaling mode”,ENUM,"{ ""None"", ""Full"", ""Center"", ""Full aspect"" }",Connector,"Supported by: amdgpu, gma500, i915, nouveau and radeon."
+,Connector,“EDID”,BLOB | IMMUTABLE,0,Connector,Contains id of edid blob ptr object.
+,,“DPMS”,ENUM,"{ “On”, “Standby”, “Suspend”, “Off” }",Connector,Contains DPMS operation mode value.
+,,“PATH”,BLOB | IMMUTABLE,0,Connector,Contains topology path to a connector.
+,,“TILE”,BLOB | IMMUTABLE,0,Connector,Contains tiling information for a connector.
+,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Connector,CRTC that connector is attached to (atomic)
+,Plane,“type”,ENUM | IMMUTABLE,"{ ""Overlay"", ""Primary"", ""Cursor"" }",Plane,Plane type
+,,“SRC_X”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source x coordinate in 16.16 fixed point (atomic)
+,,“SRC_Y”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source y coordinate in 16.16 fixed point (atomic)
+,,“SRC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source width in 16.16 fixed point (atomic)
+,,“SRC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout source height in 16.16 fixed point (atomic)
+,,“CRTC_X”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) x coordinate (atomic)
+,,“CRTC_Y”,SIGNED_RANGE,"Min=INT_MIN, Max=INT_MAX",Plane,Scanout CRTC (destination) y coordinate (atomic)
+,,“CRTC_W”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) width (atomic)
+,,“CRTC_H”,RANGE,"Min=0, Max=UINT_MAX",Plane,Scanout CRTC (destination) height (atomic)
+,,“FB_ID”,OBJECT,DRM_MODE_OBJECT_FB,Plane,Scanout framebuffer (atomic)
+,,“CRTC_ID”,OBJECT,DRM_MODE_OBJECT_CRTC,Plane,CRTC that plane is attached to (atomic)
+,DVI-I,“subconnector”,ENUM,"{ “Unknown”, “DVI-D”, “DVI-A” }",Connector,TBD
+,,“select subconnector”,ENUM,"{ “Automatic”, “DVI-D”, “DVI-A” }",Connector,TBD
+,TV,“subconnector”,ENUM,"{ ""Unknown"", ""Composite"", ""SVIDEO"", ""Component"", ""SCART"" }",Connector,TBD
+,,“select subconnector”,ENUM,"{ ""Automatic"", ""Composite"", ""SVIDEO"", ""Component"", ""SCART"" }",Connector,TBD
+,,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
+,,“left margin”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“right margin”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“top margin”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“bottom margin”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“brightness”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“contrast”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“flicker reduction”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“overscan”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“saturation”,RANGE,"Min=0, Max=100",Connector,TBD
+,,“hue”,RANGE,"Min=0, Max=100",Connector,TBD
+,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
+,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
+,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
+,,“dirty”,ENUM | IMMUTABLE,"{ ""Off"", ""On"", ""Annotate"" }",Connector,TBD
+,,“DEGAMMA_LUT”,BLOB,0,CRTC,DRM property to set the degamma lookup table (LUT) mapping pixel data from the framebuffer before it is given to the transformation matrix. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
+,,“DEGAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the DEGAMMA_LUT property (the size depends on the underlying hardware).
+,,“CTM”,BLOB,0,CRTC,DRM property to set the current transformation matrix (CTM) apply to pixel data after the lookup through the degamma LUT and before the lookup through the gamma LUT. The data is an interpreted as a struct drm_color_ctm.
+,,“GAMMA_LUT”,BLOB,0,CRTC,DRM property to set the gamma lookup table (LUT) mapping pixel data after to the transformation matrix to data sent to the connector. The data is an interpreted as an array of struct drm_color_lut elements. Hardware might choose not to use the full precision of the LUT elements nor use all the elements of the LUT (for example the hardware might choose to interpolate between LUT[0] and LUT[4]).
+,,“GAMMA_LUT_SIZE”,RANGE | IMMUTABLE,"Min=0, Max=UINT_MAX",CRTC,DRM property to gives the size of the lookup table to be set on the GAMMA_LUT property (the size depends on the underlying hardware).
+i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normaly in the range 0..1.0 are remapped to the range 16/255..235/255."
+,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
+,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
+,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""right_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""top_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""bottom_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“hpos”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“vpos”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“contrast”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“saturation”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“hue”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“sharpness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter_adaptive”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter_2d”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“tv_chroma_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“tv_luma_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“dot_crawl”,RANGE,"Min=0, Max=1",Connector,TBD
+,SDVO-TV/LVDS,“brightness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+CDV gma-500,Generic,"""Broadcast RGB""",ENUM,"{ “Full”, “Limited 16:235” }",Connector,TBD
+,,"""Broadcast RGB""",ENUM,"{ “off”, “auto”, “on” }",Connector,TBD
+Poulsbo,Generic,“backlight”,RANGE,"Min=0, Max=100",Connector,TBD
+,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
+,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""right_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""top_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,"""bottom_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“hpos”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“vpos”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“contrast”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“saturation”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“hue”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“sharpness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter_adaptive”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“flicker_filter_2d”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“tv_chroma_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“tv_luma_filter”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+,,“dot_crawl”,RANGE,"Min=0, Max=1",Connector,TBD
+,SDVO-TV/LVDS,“brightness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
+armada,CRTC,"""CSC_YUV""",ENUM,"{ ""Auto"" , ""CCIR601"", ""CCIR709"" }",CRTC,TBD
+,,"""CSC_RGB""",ENUM,"{ ""Auto"", ""Computer system"", ""Studio"" }",CRTC,TBD
+,Overlay,"""colorkey""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_min""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_max""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_val""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_alpha""",RANGE,"Min=0, Max=0xffffff",Plane,TBD
+,,"""colorkey_mode""",ENUM,"{ ""disabled"", ""Y component"", ""U component"" , ""V component"", ""RGB"", “R component"", ""G component"", ""B component"" }",Plane,TBD
+,,"""brightness""",RANGE,"Min=0, Max=256 + 255",Plane,TBD
+,,"""contrast""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
+,,"""saturation""",RANGE,"Min=0, Max=0x7fff",Plane,TBD
+exynos,CRTC,“mode”,ENUM,"{ ""normal"", ""blank"" }",CRTC,TBD
+,Overlay,“zpos”,RANGE,"Min=0, Max=MAX_PLANE-1",Plane,TBD
+i2c/ch7006_drv,Generic,“scale”,RANGE,"Min=0, Max=2",Connector,TBD
+,TV,“mode”,ENUM,"{ ""PAL"", ""PAL-M"",""PAL-N""}, ”PAL-Nc"" , ""PAL-60"", ""NTSC-M"", ""NTSC-J"" }",Connector,TBD
+nouveau,NV10 Overlay,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
+,,“contrast”,RANGE,"Min=0, Max=8192-1",Plane,TBD
+,,“brightness”,RANGE,"Min=0, Max=1024",Plane,TBD
+,,“hue”,RANGE,"Min=0, Max=359",Plane,TBD
+,,“saturation”,RANGE,"Min=0, Max=8192-1",Plane,TBD
+,,“iturbt_709”,RANGE,"Min=0, Max=1",Plane,TBD
+,Nv04 Overlay,“colorkey”,RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
+,,“brightness”,RANGE,"Min=0, Max=1024",Plane,TBD
+,Display,“dithering mode”,ENUM,"{ ""auto"", ""off"", ""on"" }",Connector,TBD
+,,“dithering depth”,ENUM,"{ ""auto"", ""off"", ""on"", ""static 2x2"", ""dynamic 2x2"", ""temporal"" }",Connector,TBD
+,,“underscan”,ENUM,"{ ""auto"", ""6 bpc"", ""8 bpc"" }",Connector,TBD
+,,“underscan hborder”,RANGE,"Min=0, Max=128",Connector,TBD
+,,“underscan vborder”,RANGE,"Min=0, Max=128",Connector,TBD
+,,“vibrant hue”,RANGE,"Min=0, Max=180",Connector,TBD
+,,“color vibrance”,RANGE,"Min=0, Max=200",Connector,TBD
+omap,Generic,“zorder”,RANGE,"Min=0, Max=3","CRTC, Plane",TBD
+qxl,Generic,"“hotplug_mode_update""",RANGE,"Min=0, Max=1",Connector,TBD
+radeon,DVI-I,“coherent”,RANGE,"Min=0, Max=1",Connector,TBD
+,DAC enable load detect,“load detection”,RANGE,"Min=0, Max=1",Connector,TBD
+,TV Standard,"""tv standard""",ENUM,"{ ""ntsc"", ""pal"", ""pal-m"", ""pal-60"", ""ntsc-j"" , ""scart-pal"", ""pal-cn"", ""secam"" }",Connector,TBD
+,legacy TMDS PLL detect,"""tmds_pll""",ENUM,"{ ""driver"", ""bios"" }",-,TBD
+,Underscan,"""underscan""",ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD
+,,"""underscan hborder""",RANGE,"Min=0, Max=128",Connector,TBD
+,,"""underscan vborder""",RANGE,"Min=0, Max=128",Connector,TBD
+,Audio,“audio”,ENUM,"{ ""off"", ""on"", ""auto"" }",Connector,TBD
+,FMT Dithering,“dither”,ENUM,"{ ""off"", ""on"" }",Connector,TBD
+rcar-du,Generic,"""alpha""",RANGE,"Min=0, Max=255",Plane,TBD
+,,"""colorkey""",RANGE,"Min=0, Max=0x01ffffff",Plane,TBD
+,,"""zpos""",RANGE,"Min=1, Max=7",Plane,TBD
diff --git a/Documentation/gpu/vga-switcheroo.rst b/Documentation/gpu/vga-switcheroo.rst
new file mode 100644 (file)
index 0000000..cbbdb99
--- /dev/null
@@ -0,0 +1,98 @@
+.. _vga_switcheroo:
+
+==============
+VGA Switcheroo
+==============
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+   :doc: Overview
+
+Modes of Use
+============
+
+Manual switching and manual power control
+-----------------------------------------
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+   :doc: Manual switching and manual power control
+
+Driver power control
+--------------------
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+   :doc: Driver power control
+
+API
+===
+
+Public functions
+----------------
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+   :export:
+
+Public structures
+-----------------
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+   :functions: vga_switcheroo_handler
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+   :functions: vga_switcheroo_client_ops
+
+Public constants
+----------------
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+   :functions: vga_switcheroo_handler_flags_t
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+   :functions: vga_switcheroo_client_id
+
+.. kernel-doc:: include/linux/vga_switcheroo.h
+   :functions: vga_switcheroo_state
+
+Private structures
+------------------
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+   :functions: vgasr_priv
+
+.. kernel-doc:: drivers/gpu/vga/vga_switcheroo.c
+   :functions: vga_switcheroo_client
+
+Handlers
+========
+
+apple-gmux Handler
+------------------
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+   :doc: Overview
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+   :doc: Interrupt
+
+Graphics mux
+~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+   :doc: Graphics mux
+
+Power control
+~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+   :doc: Power control
+
+Backlight control
+~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/platform/x86/apple-gmux.c
+   :doc: Backlight control
+
+Public functions
+~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: include/linux/apple-gmux.h
+   :internal:
diff --git a/Documentation/index.rst b/Documentation/index.rst
new file mode 100644 (file)
index 0000000..dacc77b
--- /dev/null
@@ -0,0 +1,24 @@
+.. The Linux Kernel documentation master file, created by
+   sphinx-quickstart on Fri Feb 12 13:51:46 2016.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to The Linux Kernel's documentation!
+============================================
+
+Nothing for you to see here *yet*. Please move along.
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   gpu/index
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
index 35f6a982a0d5fdc040cfb722a071c8d707c53123..220d0a80ca2c9f45ce7f0705be1952c34df4f1dd 100644 (file)
@@ -170,21 +170,92 @@ document trapinfo
        address the kernel panicked.
 end
 
+define dump_log_idx
+       set $idx = $arg0
+       if ($argc > 1)
+               set $prev_flags = $arg1
+       else
+               set $prev_flags = 0
+       end
+       set $msg = ((struct printk_log *) (log_buf + $idx))
+       set $prefix = 1
+       set $newline = 1
+       set $log = log_buf + $idx + sizeof(*$msg)
 
-define dmesg
-       set $i = 0
-       set $end_idx = (log_end - 1) & (log_buf_len - 1)
+       # prev & LOG_CONT && !(msg->flags & LOG_PREIX)
+       if (($prev_flags & 8) && !($msg->flags & 4))
+               set $prefix = 0
+       end
+
+       # msg->flags & LOG_CONT
+       if ($msg->flags & 8)
+               # (prev & LOG_CONT && !(prev & LOG_NEWLINE))
+               if (($prev_flags & 8) && !($prev_flags & 2))
+                       set $prefix = 0
+               end
+               # (!(msg->flags & LOG_NEWLINE))
+               if (!($msg->flags & 2))
+                       set $newline = 0
+               end
+       end
+
+       if ($prefix)
+               printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000
+       end
+       if ($msg->text_len != 0)
+               eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len
+       end
+       if ($newline)
+               printf "\n"
+       end
+       if ($msg->dict_len > 0)
+               set $dict = $log + $msg->text_len
+               set $idx = 0
+               set $line = 1
+               while ($idx < $msg->dict_len)
+                       if ($line)
+                               printf " "
+                               set $line = 0
+                       end
+                       set $c = $dict[$idx]
+                       if ($c == '\0')
+                               printf "\n"
+                               set $line = 1
+                       else
+                               if ($c < ' ' || $c >= 127 || $c == '\\')
+                                       printf "\\x%02x", $c
+                               else
+                                       printf "%c", $c
+                               end
+                       end
+                       set $idx = $idx + 1
+               end
+               printf "\n"
+       end
+end
+document dump_log_idx
+       Dump a single log given its index in the log buffer.  The first
+       parameter is the index into log_buf, the second is optional and
+       specified the previous log buffer's flags, used for properly
+       formatting continued lines.
+end
 
-       while ($i < logged_chars)
-               set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1)
+define dmesg
+       set $i = log_first_idx
+       set $end_idx = log_first_idx
+       set $prev_flags = 0
 
-               if ($idx + 100 <= $end_idx) || \
-                  ($end_idx <= $idx && $idx + 100 < log_buf_len)
-                       printf "%.100s", &log_buf[$idx]
-                       set $i = $i + 100
+       while (1)
+               set $msg = ((struct printk_log *) (log_buf + $i))
+               if ($msg->len == 0)
+                       set $i = 0
                else
-                       printf "%c", log_buf[$idx]
-                       set $i = $i + 1
+                       dump_log_idx $i $prev_flags
+                       set $i = $i + $msg->len
+                       set $prev_flags = $msg->flags
+               end
+               if ($i == $end_idx)
+                       loop_break
                end
        end
 end
index 82b42c958d1c7def4eac5c9931a0e8a6f9aab6c6..a2a662d4da8373e1061950209e1fc9ddf687ecce 100644 (file)
@@ -3992,8 +3992,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        trace_event=[event-list]
                        [FTRACE] Set and start specified trace events in order
-                       to facilitate early boot debugging.
-                       See also Documentation/trace/events.txt
+                       to facilitate early boot debugging. The event-list is a
+                       comma separated list of trace events to enable. See
+                       also Documentation/trace/events.txt
 
        trace_options=[option-list]
                        [FTRACE] Enable or disable tracer options at boot.
index d406d98339b25abbe3d1f5f05c4ac7f9856d3e9c..44f5e6bccd978fbe2329cdb39af87494aaafc09c 100644 (file)
@@ -74,8 +74,8 @@ blink_set() function (see <linux/leds.h>). To set an LED to blinking,
 however, it is better to use the API function led_blink_set(), as it
 will check and implement software fallback if necessary.
 
-To turn off blinking again, use the API function led_brightness_set()
-as that will not just set the LED brightness but also stop any software
+To turn off blinking, use the API function led_brightness_set()
+with brightness value LED_OFF, which should stop any software
 timers that may have been required for blinking.
 
 The blink_set() function should choose a user friendly blinking value
index 30fb842a976d3ed3b9b9e1cb0bf5e1f64640dcc4..49db1def1721cdedc09b6a4e459b1cd41f82a749 100644 (file)
@@ -1538,9 +1538,9 @@ set_cmdline(struct mic_info *mic)
 
        len = snprintf(buffer, PATH_MAX,
                "clocksource=tsc highres=off nohz=off ");
-       len += snprintf(buffer + len, PATH_MAX,
+       len += snprintf(buffer + len, PATH_MAX - len,
                "cpufreq_on;corec6_off;pc3_off;pc6_off ");
-       len += snprintf(buffer + len, PATH_MAX,
+       len += snprintf(buffer + len, PATH_MAX - len,
                "ifcfg=static;address,172.31.%d.1;netmask,255.255.255.0",
                mic->id + 1);
 
index 631b0f7ae16fd6506a7e171d04f874b00c09da67..9d05ed7f7da51648cafeee84a2a21d9711c19dde 100644 (file)
@@ -369,8 +369,6 @@ does not allocate any driver private context space.
 Switch configuration
 --------------------
 
-- priv_size: additional size needed by the switch driver for its private context
-
 - tag_protocol: this is to indicate what kind of tagging protocol is supported,
   should be a valid value from the dsa_tag_protocol enum
 
@@ -416,11 +414,6 @@ PHY devices and link management
   to the switch port MDIO registers. If unavailable return a negative error
   code.
 
-- poll_link: Function invoked by DSA to query the link state of the switch
-  builtin Ethernet PHYs, per port. This function is responsible for calling
-  netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a
-  single call. Executes from workqueue context.
-
 - adjust_link: Function invoked by the PHY library when a slave network device
   is attached to a PHY device. This function is responsible for appropriately
   configuring the switch port link parameters: speed, duplex, pause based on
@@ -542,6 +535,16 @@ Bridge layer
 Bridge VLAN filtering
 ---------------------
 
+- port_vlan_filtering: bridge layer function invoked when the bridge gets
+  configured for turning on or off VLAN filtering. If nothing specific needs to
+  be done at the hardware level, this callback does not need to be implemented.
+  When VLAN filtering is turned on, the hardware must be programmed with
+  rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed
+  VLAN ID map/rules.  If there is no PVID programmed into the switch port,
+  untagged frames must be rejected as well. When turned off the switch must
+  accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are
+  allowed.
+
 - port_vlan_prepare: bridge layer function invoked when the bridge prepares the
   configuration of a VLAN on the given port. If the operation is not supported
   by the hardware, this function should return -EOPNOTSUPP to inform the bridge
index 6c7f365b15157a42538a9fa65a43db06b1a08af9..9ae929395b24c442e61bb81e07609fbf8c55ffdb 100644 (file)
@@ -1036,15 +1036,17 @@ proxy_arp_pvlan - BOOLEAN
 
 shared_media - BOOLEAN
        Send(router) or accept(host) RFC1620 shared media redirects.
-       Overrides ip_secure_redirects.
+       Overrides secure_redirects.
        shared_media for the interface will be enabled if at least one of
        conf/{all,interface}/shared_media is set to TRUE,
        it will be disabled otherwise
        default TRUE
 
 secure_redirects - BOOLEAN
-       Accept ICMP redirect messages only for gateways,
-       listed in default gateway list.
+       Accept ICMP redirect messages only to gateways listed in the
+       interface's current gateway list. Even if disabled, RFC1122 redirect
+       rules still apply.
+       Overridden by shared_media.
        secure_redirects for the interface will be enabled if at least one of
        conf/{all,interface}/secure_redirects is set to TRUE,
        it will be disabled otherwise
index 8638f61c8c9db70d767302497d417be6c9a0a030..37eca00796eeebf805afbbf1504753c1a0febec0 100644 (file)
@@ -263,19 +263,23 @@ scmd->allowed.
 
  3. scmd recovered
     ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
-       - shost->host_failed--
        - clear scmd->eh_eflags
        - scsi_setup_cmd_retry()
        - move from local eh_work_q to local eh_done_q
     LOCKING: none
+    CONCURRENCY: at most one thread per separate eh_work_q to
+                keep queue manipulation lockless
 
  4. EH completes
     ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
-           layer of failure.
+           layer of failure. May be called concurrently but must have
+           a no more than one thread per separate eh_work_q to
+           manipulate the queue locklessly
        - scmd is removed from eh_done_q and scmd->eh_entry is cleared
        - if retry is necessary, scmd is requeued using
           scsi_queue_insert()
        - otherwise, scsi_finish_command() is invoked for scmd
+       - zero shost->host_failed
     LOCKING: queue or finish function performs appropriate locking
 
 
index 20d05719bceb92fa6c05648d5abe2e824be408de..3849814bfe6dd22e501407b887e717fd4a8dc2fa 100644 (file)
@@ -826,7 +826,8 @@ The keyctl syscall functions are:
  (*) Compute a Diffie-Hellman shared secret or public key
 
        long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params,
-                  char *buffer, size_t buflen);
+                  char *buffer, size_t buflen,
+                  void *reserved);
 
      The params struct contains serial numbers for three keys:
 
@@ -843,6 +844,8 @@ The keyctl syscall functions are:
      public key.  If the base is the remote public key, the result is
      the shared secret.
 
+     The reserved argument must be set to NULL.
+
      The buffer length must be at least the length of the prime, or zero.
 
      If the buffer length is nonzero, the length of the result is
index babd6378ec05730a0dd26af3ffadecc295883746..3010576c9fca047af63b668a926e6bd9f923563d 100644 (file)
@@ -183,8 +183,9 @@ provide meaningful defenses.
 ### Canaries, blinding, and other secrets
 
 It should be noted that things like the stack canary discussed earlier
-are technically statistical defenses, since they rely on a (leakable)
-secret value.
+are technically statistical defenses, since they rely on a secret value,
+and such values may become discoverable through an information exposure
+flaw.
 
 Blinding literal values for things like JITs, where the executable
 contents may be partially under the control of userspace, need a similar
@@ -199,8 +200,8 @@ working?) in order to maximize their success.
 Since the location of kernel memory is almost always instrumental in
 mounting a successful attack, making the location non-deterministic
 raises the difficulty of an exploit. (Note that this in turn makes
-the value of leaks higher, since they may be used to discover desired
-memory locations.)
+the value of information exposures higher, since they may be used to
+discover desired memory locations.)
 
 #### Text and module base
 
@@ -222,14 +223,21 @@ become more difficult to locate.
 Much of the kernel's dynamic memory (e.g. kmalloc, vmalloc, etc) ends up
 being relatively deterministic in layout due to the order of early-boot
 initializations. If the base address of these areas is not the same
-between boots, targeting them is frustrated, requiring a leak specific
-to the region.
+between boots, targeting them is frustrated, requiring an information
+exposure specific to the region.
+
+#### Structure layout
+
+By performing a per-build randomization of the layout of sensitive
+structures, attacks must either be tuned to known kernel builds or expose
+enough kernel memory to determine structure layouts before manipulating
+them.
 
 
-## Preventing Leaks
+## Preventing Information Exposures
 
 Since the locations of sensitive structures are the primary target for
-attacks, it is important to defend against leaks of both kernel memory
+attacks, it is important to defend against exposure of both kernel memory
 addresses and kernel memory contents (since they may contain kernel
 addresses or other sensitive things like canary values).
 
@@ -250,8 +258,8 @@ sure structure holes are cleared.
 When releasing memory, it is best to poison the contents (clear stack on
 syscall return, wipe heap memory on a free), to avoid reuse attacks that
 rely on the old contents of memory. This frustrates many uninitialized
-variable attacks, stack info leaks, heap info leaks, and use-after-free
-attacks.
+variable attacks, stack content exposures, heap content exposures, and
+use-after-free attacks.
 
 ### Destination tracking
 
diff --git a/Documentation/sphinx/convert_template.sed b/Documentation/sphinx/convert_template.sed
new file mode 100644 (file)
index 0000000..c1503fc
--- /dev/null
@@ -0,0 +1,18 @@
+#
+# Pandoc doesn't grok <function> or <structname>, so convert them
+# ahead of time.
+#
+# Use the following escapes to pass through pandoc:
+#      $bq = "`"
+#      $lt = "<"
+#      $gt = ">"
+#
+s%<function>\([^<(]\+\)()</function>%:c:func:$bq\1()$bq%g
+s%<function>\([^<(]\+\)</function>%:c:func:$bq\1()$bq%g
+s%<structname>struct *\([^<]\+\)</structname>%:c:type:$bqstruct \1 $lt\1$gt$bq%g
+s%struct <structname>\([^<]\+\)</structname>%:c:type:$bqstruct \1 $lt\1$gt$bq%g
+s%<structname>\([^<]\+\)</structname>%:c:type:$bqstruct \1 $lt\1$gt$bq%g
+#
+# Wrap docproc directives in para and code blocks.
+#
+s%^\(!.*\)$%<para><code>DOCPROC: \1</code></para>%
diff --git a/Documentation/sphinx/kernel-doc.py b/Documentation/sphinx/kernel-doc.py
new file mode 100644 (file)
index 0000000..4adfb0e
--- /dev/null
@@ -0,0 +1,127 @@
+# coding=utf-8
+#
+# Copyright © 2016 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+# Authors:
+#    Jani Nikula <jani.nikula@intel.com>
+#
+# Please make sure this works on both python2 and python3.
+#
+
+import os
+import subprocess
+import sys
+import re
+
+from docutils import nodes, statemachine
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives
+from sphinx.util.compat import Directive
+
+class KernelDocDirective(Directive):
+    """Extract kernel-doc comments from the specified file"""
+    required_argument = 1
+    optional_arguments = 4
+    option_spec = {
+        'doc': directives.unchanged_required,
+        'functions': directives.unchanged_required,
+        'export': directives.flag,
+        'internal': directives.flag,
+    }
+    has_content = False
+
+    def run(self):
+        env = self.state.document.settings.env
+        cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
+
+        filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
+
+        # Tell sphinx of the dependency
+        env.note_dependency(os.path.abspath(filename))
+
+        tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
+        source = filename
+
+        # FIXME: make this nicer and more robust against errors
+        if 'export' in self.options:
+            cmd += ['-export']
+        elif 'internal' in self.options:
+            cmd += ['-internal']
+        elif 'doc' in self.options:
+            cmd += ['-function', str(self.options.get('doc'))]
+        elif 'functions' in self.options:
+            for f in str(self.options.get('functions')).split(' '):
+                cmd += ['-function', f]
+
+        cmd += [filename]
+
+        try:
+            env.app.verbose('calling kernel-doc \'%s\'' % (" ".join(cmd)))
+
+            p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
+            out, err = p.communicate()
+
+            # python2 needs conversion to unicode.
+            # python3 with universal_newlines=True returns strings.
+            if sys.version_info.major < 3:
+                out, err = unicode(out, 'utf-8'), unicode(err, 'utf-8')
+
+            if p.returncode != 0:
+                sys.stderr.write(err)
+
+                env.app.warn('kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
+                return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
+            elif env.config.kerneldoc_verbosity > 0:
+                sys.stderr.write(err)
+
+            lines = statemachine.string2lines(out, tab_width, convert_whitespace=True)
+            result = ViewList()
+
+            lineoffset = 0;
+            line_regex = re.compile("^#define LINENO ([0-9]+)$")
+            for line in lines:
+                match = line_regex.search(line)
+                if match:
+                    # sphinx counts lines from 0
+                    lineoffset = int(match.group(1)) - 1
+                    # we must eat our comments since the upset the markup
+                else:
+                    result.append(line, source, lineoffset)
+                    lineoffset += 1
+
+            node = nodes.section()
+            node.document = self.state.document
+            self.state.nested_parse(result, self.content_offset, node)
+
+            return node.children
+
+        except Exception as e:
+            env.app.warn('kernel-doc \'%s\' processing failed with: %s' %
+                         (" ".join(cmd), str(e)))
+            return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
+
+def setup(app):
+    app.add_config_value('kerneldoc_bin', None, 'env')
+    app.add_config_value('kerneldoc_srctree', None, 'env')
+    app.add_config_value('kerneldoc_verbosity', 1, 'env')
+
+    app.add_directive('kernel-doc', KernelDocDirective)
diff --git a/Documentation/sphinx/post_convert.sed b/Documentation/sphinx/post_convert.sed
new file mode 100644 (file)
index 0000000..392770b
--- /dev/null
@@ -0,0 +1,23 @@
+#
+# Unescape.
+#
+s/$bq/`/g
+s/$lt/</g
+s/$gt/>/g
+#
+# pandoc thinks that both "_" needs to be escaped.  Remove the extra
+# backslashes.
+#
+s/\\_/_/g
+#
+# Unwrap docproc directives.
+#
+s/^``DOCPROC: !E\(.*\)``$/.. kernel-doc:: \1\n   :export:/
+s/^``DOCPROC: !I\(.*\)``$/.. kernel-doc:: \1\n   :internal:/
+s/^``DOCPROC: !F\([^ ]*\) \(.*\)``$/.. kernel-doc:: \1\n   :functions: \2/
+s/^``DOCPROC: !P\([^ ]*\) \(.*\)``$/.. kernel-doc:: \1\n   :doc: \2/
+s/^``DOCPROC: \(!.*\)``$/.. WARNING: DOCPROC directive not supported: \1/
+#
+# Trim trailing whitespace.
+#
+s/[[:space:]]*$//
diff --git a/Documentation/sphinx/tmplcvt b/Documentation/sphinx/tmplcvt
new file mode 100755 (executable)
index 0000000..909a730
--- /dev/null
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Convert a template file into something like RST
+#
+# fix <function>
+# feed to pandoc
+# fix \_
+# title line?
+#
+
+in=$1
+rst=$2
+tmp=$rst.tmp
+
+cp $in $tmp
+sed --in-place -f convert_template.sed $tmp
+pandoc -s -S -f docbook -t rst -o $rst $tmp
+sed --in-place -f post_convert.sed $rst
+rm $tmp
index eaf8297dbca23cfeeeeb7b21b97385715246f451..e8e2ebafe5fa0ea350af12b5103788e3d04de268 100644 (file)
@@ -6,8 +6,8 @@
 
 This document serves as a guide for device drivers writers on what the
 sync_file API is, and how drivers can support it. Sync file is the carrier of
-the fences(struct fence) that needs to synchronized between drivers or across
-process boundaries.
+the fences(struct fence) that are needed to synchronize between drivers or
+across process boundaries.
 
 The sync_file API is meant to be used to send and receive fence information
 to/from userspace. It enables userspace to do explicit fencing, where instead
@@ -32,7 +32,7 @@ in-fences and out-fences
 Sync files can go either to or from userspace. When a sync_file is sent from
 the driver to userspace we call the fences it contains 'out-fences'. They are
 related to a buffer that the driver is processing or is going to process, so
-the driver an create out-fence to be able to notify, through fence_signal(),
+the driver creates an out-fence to be able to notify, through fence_signal(),
 when it has finished using (or processing) that buffer. Out-fences are fences
 that the driver creates.
 
index 1a5a12184a358dc395874447ccc3c502d67f567b..85d0549ad84636652d1867d7d0a58c7ed1cc3c19 100644 (file)
@@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
    MPX-instrumented.
 3) The kernel detects that the CPU has MPX, allows the new prctl() to
    succeed, and notes the location of the bounds directory. Userspace is
-   expected to keep the bounds directory at that locationWe note it
+   expected to keep the bounds directory at that locationWe note it
    instead of reading it each time because the 'xsave' operation needed
    to access the bounds directory register is an expensive operation.
 4) If the application needs to spill bounds out of the 4 registers, it
@@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
 We need to decode MPX instructions to get violation address and
 set this address into extended struct siginfo.
 
-The _sigfault feild of struct siginfo is extended as follow:
+The _sigfault field of struct siginfo is extended as follow:
 
 87             /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
 88             struct {
@@ -240,5 +240,5 @@ them at the same bounds table.
 This is allowed architecturally.  See more information "Intel(R) Architecture
 Instruction Set Extensions Programming Reference" (9.3.4).
 
-However, if users did this, the kernel might be fooled in to unmaping an
+However, if users did this, the kernel might be fooled in to unmapping an
 in-use bounds table since it does not recognize sharing.
index 39d1723267036a5ba7dff02dc82e39d69a1eeae8..6a0607b99ed8780c45920676e9b122efefa77235 100644 (file)
@@ -5,7 +5,7 @@ memory, it has two choices:
     from areas other than the one we are trying to flush will be
     destroyed and must be refilled later, at some cost.
  2. Use the invlpg instruction to invalidate a single page at a
-    time.  This could potentialy cost many more instructions, but
+    time.  This could potentially cost many more instructions, but
     it is a much more precise operation, causing no collateral
     damage to other TLB entries.
 
@@ -19,7 +19,7 @@ Which method to do depends on a few things:
     work.
  3. The size of the TLB.  The larger the TLB, the more collateral
     damage we do with a full flush.  So, the larger the TLB, the
-    more attrative an individual flush looks.  Data and
+    more attractive an individual flush looks.  Data and
     instructions have separate TLBs, as do different page sizes.
  4. The microarchitecture.  The TLB has become a multi-level
     cache on modern CPUs, and the global flushes have become more
index b1fb30273286c7ae6878f6b43fecb6df8f8f1603..d0648a74fceb50659d2eb018d47178619636b74e 100644 (file)
@@ -36,7 +36,7 @@ between all CPUs.
 
 check_interval
        How often to poll for corrected machine check errors, in seconds
-       (Note output is hexademical). Default 5 minutes.  When the poller
+       (Note output is hexadecimal). Default 5 minutes.  When the poller
        finds MCEs it triggers an exponential speedup (poll more often) on
        the polling interval.  When the poller stops finding MCEs, it
        triggers an exponential backoff (poll less often) on the polling
index 654afd72eb24762e926440656355352c5253eaba..bbb9d6ae05ca1ea688bedc854ea9ff7652eb6295 100644 (file)
@@ -24,34 +24,33 @@ Documentation/CodingStyle的中文翻译
 
                Linux内核代码风格
 
-这是一个简短的文档,描述了linux内核的首选代码风格。代码风格是因人而异的,而且我
-不愿意把我的观点强加给任何人,不过这里所讲述的是我必须要维护的代码所遵守的风格,
-并且我也希望绝大多数其他代码也能遵守这个风格。请在写代码时至少考虑一下本文所述的
-风格。
+这是一个简短的文档,描述了 linux 内核的首选代码风格。代码风格是因人而异的,而且我
+不愿意把自己的观点强加给任何人,但这就像我去做任何事情都必须遵循的原则那样,我也
+希望在绝大多数事上保持这种的态度。请(在写代码时)至少考虑一下这里的代码风格。
 
-首先,我建议你打印一份GNU代码规范,然后不要读它。烧了它,这是一个具有重大象征性
-意义的动作。
+首先,我建议你打印一份 GNU 代码规范,然后不要读。烧了它,这是一个具有重大象征性意义
+的动作。
 
 不管怎样,现在我们开始:
 
 
-               第一章:缩进
+               第一章:缩进
 
-制表符是8个字符,所以缩进也是8个字符。有些异端运动试图将缩进变为4(乃至2)个字符
-深,这几乎相当于尝试将圆周率的值定义为3。
+制表符是 8 个字符,所以缩进也是 8 个字符。有些异端运动试图将缩进变为 4(甚至 2!)
+个字符深,这几乎相当于尝试将圆周率的值定义为 3。
 
 理由:缩进的全部意义就在于清楚的定义一个控制块起止于何处。尤其是当你盯着你的屏幕
-连续看了20小时之后,你将会发现大一点的缩进会使你更容易分辨缩进。
+连续看了 20 小时之后,你将会发现大一点的缩进会使你更容易分辨缩进。
 
-现在,有些人会抱怨8个字符的缩进会使代码向右边移动的太远,在80个字符的终端屏幕上
-就很难读这样的代码。这个问题的答案是,如果你需要3级以上的缩进,不管用何种方式你
+现在,有些人会抱怨 8 个字符的缩进会使代码向右边移动的太远,在 80 个字符的终端屏幕上
+就很难读这样的代码。这个问题的答案是,如果你需要 3 级以上的缩进,不管用何种方式你
 的代码已经有问题了,应该修正你的程序。
 
-简而言之,8个字符的缩进可以让代码更容易阅读,还有一个好处是当你的函数嵌套太深的
+简而言之,8 个字符的缩进可以让代码更容易阅读,还有一个好处是当你的函数嵌套太深的
 时候可以给你警告。留心这个警告。
 
-在switch语句中消除多级缩进的首选的方式是让“switch”和从属于它的“case”标签对齐于同
-一列,而不要“两次缩进”“case”标签。比如:
+在 switch 语句中消除多级缩进的首选的方式是让 “switch” 和从属于它的 “case” 标签
+对齐于同一列,而不要 “两次缩进” “case” 标签。比如:
 
        switch (suffix) {
        case 'G':
@@ -70,7 +69,6 @@ Documentation/CodingStyle的中文翻译
                break;
        }
 
-
 不要把多个语句放在一行里,除非你有什么东西要隐藏:
 
        if (condition) do_this;
@@ -79,7 +77,7 @@ Documentation/CodingStyle的中文翻译
 也不要在一行里放多个赋值语句。内核代码风格超级简单。就是避免可能导致别人误读的表
 达式。
 
-除了注释、文档和Kconfig之外,不要使用空格来缩进,前面的例子是例外,是有意为之。
+除了注释、文档和 Kconfig 之外,不要使用空格来缩进,前面的例子是例外,是有意为之。
 
 选用一个好的编辑器,不要在行尾留空格。
 
@@ -88,27 +86,18 @@ Documentation/CodingStyle的中文翻译
 
 代码风格的意义就在于使用平常使用的工具来维持代码的可读性和可维护性。
 
-每一行的长度的限制是80列,我们强烈建议您遵守这个惯例。
+每一行的长度的限制是 80 列,我们强烈建议您遵守这个惯例。
 
-长于80列的语句要打散成有意义的片段。每个片段要明显短于原来的语句,而且放置的位置
-也明显的靠右。同样的规则也适用于有很长参数列表的函数头。长字符串也要打散成较短的
-字符串。唯一的例外是超过80列可以大幅度提高可读性并且不会隐藏信息的情况。
-
-void fun(int a, int b, int c)
-{
-       if (condition)
-               printk(KERN_WARNING "Warning this is a long printk with "
-                                               "3 parameters a: %u b: %u "
-                                               "c: %u \n", a, b, c);
-       else
-               next_statement;
-}
+长于 80 列的语句要打散成有意义的片段。除非超过 80 列能显著增加可读性,并且不会隐藏
+信息。子片段要明显短于母片段,并明显靠右。这同样适用于有着很长参数列表的函数头。
+然而,绝对不要打散对用户可见的字符串,例如 printk 信息,因为这将导致无法 grep 这些
+信息。
 
                第三章:大括号和空格的放置
 
 C语言风格中另外一个常见问题是大括号的放置。和缩进大小不同,选择或弃用某种放置策
-略并没有多少技术上的原因,不过首选的方式,就像Kernighan和Ritchie展示给我们的,是
-把起始大括号放在行尾,而把结束大括号放在行首,所以:
+略并没有多少技术上的原因,不过首选的方式,就像 Kernighan 和 Ritchie 展示给我们的,
\98¯æ\8a\8aèµ·å§\8b大æ\8b¬å\8f·æ\94¾å\9c¨è¡\8cå°¾ï¼\8cè\80\8cæ\8a\8aç»\93æ\9d\9f大æ\8b¬å\8f·æ\94¾å\9c¨è¡\8cé¦\96ï¼\8cæ\89\80以ï¼\9a
 
        if (x is true) {
                we do y
@@ -134,12 +123,12 @@ C语言风格中另外一个常见问题是大括号的放置。和缩进大小
                body of function
        }
 
-全世界的异端可能会抱怨这个不一致性是……呃……不一致的,不过所有思维健全的人都知道
-a)K&R是_正确的_,并且(b)K&R是正确的。此外,不管怎样函数都是特殊的(在C语言中
-函数是不能嵌套的)。
+全世界的异端可能会抱怨这个不一致性是……呃……不一致的,不过所有思维健全的人都知道
+(a) K&R 是 _正确的_,并且 (b) K&R 是正确的。此外,不管怎样函数都是特殊的(C
+函数是不能嵌套的)。
 
-注意结束大括号独自占据一行,除非它后面跟着同一个语句的剩余部分,也就是do语句中的
-“while”或者if语句中的“else”,像这样:
+注意结束大括号独自占据一行,除非它后面跟着同一个语句的剩余部分,也就是 do 语句中的
+“while” 或者 if 语句中的 “else”,像这样:
 
        do {
                body of do-loop
@@ -158,41 +147,50 @@ a)K&R是_正确的_,并且(b)K&R是正确的。此外,不管怎样函
 理由:K&R。
 
 也请注意这种大括号的放置方式也能使空(或者差不多空的)行的数量最小化,同时不失可
-读性。因此,由于你的屏幕上的新行是不可再生资源(想想25行的终端屏幕),你将会有更
+读性。因此,由于你的屏幕上的新行是不可再生资源(想想 25 行的终端屏幕),你将会有更
 多的空行来放置注释。
 
 当只有一个单独的语句的时候,不用加不必要的大括号。
 
-if (condition)
-       action();
+       if (condition)
+               action();
+
+和
+
+       if (condition)
+               do_this();
+       else
+               do_that();
 
-这点不适用于本身为某个条件语句的一个分支的单独语句。这时需要在两个分支里都使用大
-括号。
+这并不适用于只有一个条件分支是单语句的情况;这时所有分支都要使用大括号:
 
-if (condition) {
-       do_this();
-       do_that();
-} else {
-       otherwise();
-}
+       if (condition) {
+               do_this();
+               do_that();
+       } else {
+               otherwise();
+       }
 
                3.1:空格
 
-Linux内核的空格使用方式(主要)取决于它是用于函数还是关键字。(大多数)关键字后
-要加一个空格。值得注意的例外是sizeof、typeof、alignof和__attribute__,这些关键字
-某些程度上看起来更像函数(它们在Linux里也常常伴随小括号而使用,尽管在C语言里这样
-的小括号不是必需的,就像“struct fileinfo info”声明过后的“sizeof info”)。
+Linux 内核的空格使用方式(主要)取决于它是用于函数还是关键字。(大多数)关键字后
+要加一个空格。值得注意的例外是 sizeof、typeof、alignof 和 __attribute__,这些
+关键字某些程度上看起来更像函数(它们在 Linux 里也常常伴随小括号而使用,尽管在 C 里
+这样的小括号不是必需的,就像 “struct fileinfo info” 声明过后的 “sizeof info”)。
 
 所以在这些关键字之后放一个空格:
+
        if, switch, case, for, do, while
-但是不要在sizeof、typeof、alignof或者__attribute__这些关键字之后放空格。例如,
+
+但是不要在 sizeof、typeof、alignof 或者 __attribute__ 这些关键字之后放空格。例如,
+
        s = sizeof(struct file);
 
 不要在小括号里的表达式两侧加空格。这是一个反例:
 
        s = sizeof( struct file );
 
-当声明指针类型或者返回指针类型的函数时,“*”的首选使用方式是使之靠近变量名或者函
+当声明指针类型或者返回指针类型的函数时,“*” 的首选使用方式是使之靠近变量名或者函
 数名,而不是靠近类型名。例子:
 
        char *linux_banner;
@@ -204,15 +202,18 @@ Linux内核的空格使用方式(主要)取决于它是用于函数还是关
        =  +  -  <  >  *  /  %  |  &  ^  <=  >=  ==  !=  ?  :
 
 但是一元操作符后不要加空格:
+
        &  *  +  -  ~  !  sizeof  typeof  alignof  __attribute__  defined
 
 后缀自加和自减一元操作符前不加空格:
+
        ++  --
 
 前缀自加和自减一元操作符后不加空格:
+
        ++  --
 
\80\9c\80\9då\92\8câ\80\9c->â\80\9d结构体成员操作符前后不加空格。
\80\98\80\99 å\92\8c â\80\9c->â\80\9d 结构体成员操作符前后不加空格。
 
 不要在行尾留空白。有些可以自动缩进的编辑器会在新行的行首加入适量的空白,然后你
 就可以直接在那一行输入代码。不过假如你最后没有在那一行输入代码,有些编辑器就不
@@ -225,23 +226,23 @@ Linux内核的空格使用方式(主要)取决于它是用于函数还是关
 
                第四章:命名
 
-C是一个简朴的语言,你的命名也应该这样。和Modula-2和Pascal程序员不同,C程序员不使
-用类似ThisVariableIsATemporaryCounter这样华丽的名字。C程序员会称那个变量为“tmp”
-,这样写起来会更容易,而且至少不会令其难于理解。
+C是一个简朴的语言,你的命名也应该这样。和 Modula-2 和 Pascal 程序员不同,C 程序员
+不使用类似 ThisVariableIsATemporaryCounter 这样华丽的名字。C 程序员会称那个变量
+为 “tmp”,这样写起来会更容易,而且至少不会令其难于理解。
 
 不过,虽然混用大小写的名字是不提倡使用的,但是全局变量还是需要一个具描述性的名字
-。称一个全局函数为“foo”是一个难以饶恕的错误。
+。称一个全局函数为 “foo” 是一个难以饶恕的错误。
 
 全局变量(只有当你真正需要它们的时候再用它)需要有一个具描述性的名字,就像全局函
-数。如果你有一个可以计算活动用户数量的函数,你应该叫它“count_active_users()”或者
-类似的名字,你不应该叫它“cntuser()”。
+数。如果你有一个可以计算活动用户数量的函数,你应该叫它 “count_active_users()”
+或者类似的名字,你不应该叫它 “cntuser()”。
 
 在函数名中包含函数类型(所谓的匈牙利命名法)是脑子出了问题——编译器知道那些类型而
 且能够检查那些类型,这样做只能把程序员弄糊涂了。难怪微软总是制造出有问题的程序。
 
 本地变量名应该简短,而且能够表达相关的含义。如果你有一些随机的整数型的循环计数器
-,它应该被称为“i”。叫它“loop_counter”并无益处,如果它没有被误解的可能的话。类似
\9a\84ï¼\8câ\80\9ctmpâ\80\9d可以用来称呼任意类型的临时变量。
+,它应该被称为 “i”。叫它 “loop_counter” 并无益处,如果它没有被误解的可能的话。
±»ä¼¼ç\9a\84ï¼\8câ\80\9ctmpâ\80\9d 可以用来称呼任意类型的临时变量。
 
 如果你怕混淆了你的本地变量名,你就遇到另一个问题了,叫做函数增长荷尔蒙失衡综合症
 。请看第六章(函数)。
@@ -249,9 +250,9 @@ C是一个简朴的语言,你的命名也应该这样。和Modula-2和Pascal
 
                第五章:Typedef
 
-不要使用类似“vps_t”之类的东西。
+不要使用类似 “vps_t” 之类的东西。
 
-对结构体和指针使用typedef是一个错误。当你在代码里看到:
+对结构体和指针使用 typedef 是一个错误。当你在代码里看到:
 
        vps_t a;
 
@@ -261,91 +262,91 @@ C是一个简朴的语言,你的命名也应该这样。和Modula-2和Pascal
 
        struct virtual_container *a;
 
-你就知道“a”是什么了。
+你就知道 “a” 是什么了。
 
-很多人认为typedef“能提高可读性”。实际不是这样的。它们只在下列情况下有用:
+很多人认为 typedef “能提高可读性”。实际不是这样的。它们只在下列情况下有用:
 
- (a) 完全不透明的对象(这种情况下要主动使用typedef来隐藏这个对象实际上是什么)。
+ (a) 完全不透明的对象(这种情况下要主动使用 typedef 来隐藏这个对象实际上是什么)。
 
-     例如:“pte_t”等不透明对象,你只能用合适的访问函数来访问它们。
+     例如:“pte_t” 等不透明对象,你只能用合适的访问函数来访问它们。
 
-     注意!不透明性和“访问函数”本身是不好的。我们使用pte_t等类型的原因在于真的是
+     注意!不透明性和“访问函数”本身是不好的。我们使用 pte_t 等类型的原因在于真的是
      完全没有任何共用的可访问信息。
 
- (b) 清楚的整数类型,如此,这层抽象就可以帮助消除到底是“int”还是“long”的混淆。
+ (b) 清楚的整数类型,如此,这层抽象就可以帮助消除到底是 “int” 还是 “long” 的混淆。
 
-     u8/u16/u32是完全没有问题的typedef,不过它们更符合类别(d)而不是这里。
+     u8/u16/u32 是完全没有问题的 typedef,不过它们更符合类别 (d) 而不是这里。
 
-     再次注意!要这样做,必须事出有因。如果某个变量是“unsigned long“,那么没有必要
+     再次注意!要这样做,必须事出有因。如果某个变量是 “unsigned long“,那么没有必要
 
        typedef unsigned long myflags_t;
 
-     不过如果有一个明确的原因,比如它在某种情况下可能会是一个“unsigned int”而在
-     其他情况下可能为“unsigned long”,那么就不要犹豫,请务必使用typedef。
+     不过如果有一个明确的原因,比如它在某种情况下可能会是一个 “unsigned int” 而在
+     其他情况下可能为 “unsigned long”,那么就不要犹豫,请务必使用 typedef。
 
  (c) 当你使用sparse按字面的创建一个新类型来做类型检查的时候。
 
  (d) 和标准C99类型相同的类型,在某些例外的情况下。
 
-     虽然让眼睛和脑筋来适应新的标准类型比如“uint32_t”不需要花很多时间,可是有些
+     虽然让眼睛和脑筋来适应新的标准类型比如 “uint32_t” 不需要花很多时间,可是有些
      人仍然拒绝使用它们。
 
-     因此,Linux特有的等同于标准类型的“u8/u16/u32/u64”类型和它们的有符号类型是被
+     因此,Linux 特有的等同于标准类型的 “u8/u16/u32/u64” 类型和它们的有符号类型是被
      允许的——尽管在你自己的新代码中,它们不是强制要求要使用的。
 
      当编辑已经使用了某个类型集的已有代码时,你应该遵循那些代码中已经做出的选择。
 
  (e) 可以在用户空间安全使用的类型。
 
-     在某些用户空间可见的结构体里,我们不能要求C99类型而且不能用上面提到的“u32”
-     类型。因此,我们在与用户空间共享的所有结构体中使用__u32和类似的类型。
+     在某些用户空间可见的结构体里,我们不能要求C99类型而且不能用上面提到的 “u32”
+     类型。因此,我们在与用户空间共享的所有结构体中使用 __u32 和类似的类型。
 
-可能还有其他的情况,不过基本的规则是永远不要使用typedef,除非你可以明确的应用上
+可能还有其他的情况,不过基本的规则是永远不要使用 typedef,除非你可以明确的应用上
 述某个规则中的一个。
 
 总的来说,如果一个指针或者一个结构体里的元素可以合理的被直接访问到,那么它们就不
-应该是一个typedef。
+应该是一个 typedef。
 
 
                第六章:函数
 
 函数应该简短而漂亮,并且只完成一件事情。函数应该可以一屏或者两屏显示完(我们都知
-道ISO/ANSI屏幕大小是80x24),只做一件事情,而且把它做好。
+道 ISO/ANSI 屏幕大小是 80x24),只做一件事情,而且把它做好。
 
 一个函数的最大长度是和该函数的复杂度和缩进级数成反比的。所以,如果你有一个理论上
-很简单的只有一个很长(但是简单)的case语句的函数,而且你需要在每个case里做很多很
-小的事情,这样的函数尽管很长,但也是可以的。
+很简单的只有一个很长(但是简单)的 case 语句的函数,而且你需要在每个 case 里做
¾\88å¤\9aå¾\88å°\8fç\9a\84äº\8bæ\83\85ï¼\8cè¿\99æ ·ç\9a\84å\87½æ\95°å°½ç®¡å¾\88é\95¿ï¼\8cä½\86ä¹\9fæ\98¯å\8f¯ä»¥ç\9a\84ã\80\82
 
 不过,如果你有一个复杂的函数,而且你怀疑一个天分不是很高的高中一年级学生可能甚至
 搞不清楚这个函数的目的,你应该严格的遵守前面提到的长度限制。使用辅助函数,并为之
 取个具描述性的名字(如果你觉得它们的性能很重要的话,可以让编译器内联它们,这样的
 效果往往会比你写一个复杂函数的效果要好。)
 
-函数的另外一个衡量标准是本地变量的数量。此数量不应超过5-10个,否则你的函数就有
+函数的另外一个衡量标准是本地变量的数量。此数量不应超过 5-10 个,否则你的函数就有
 问题了。重新考虑一下你的函数,把它分拆成更小的函数。人的大脑一般可以轻松的同时跟
-踪7个不同的事物,如果再增多的话,就会糊涂了。即便你聪颖过人,你也可能会记不清你2
-个星期前做过的事情。
+踪 7 个不同的事物,如果再增多的话,就会糊涂了。即便你聪颖过人,你也可能会记不清你
+个星期前做过的事情。
 
-在源文件里,使用空行隔开不同的函数。如果该函数需要被导出,它的EXPORT*宏应该紧贴
+在源文件里,使用空行隔开不同的函数。如果该函数需要被导出,它的 EXPORT* 宏应该紧贴
 在它的结束大括号之下。比如:
 
-int system_is_up(void)
-{
-       return system_state == SYSTEM_RUNNING;
-}
-EXPORT_SYMBOL(system_is_up);
+       int system_is_up(void)
+       {
+               return system_state == SYSTEM_RUNNING;
+       }
+       EXPORT_SYMBOL(system_is_up);
 
-在函数原型中,包含函数名和它们的数据类型。虽然C语言里没有这样的要求,在Linux里这
+在函数原型中,包含函数名和它们的数据类型。虽然C语言里没有这样的要求,在 Linux 里这
 是提倡的做法,因为这样可以很简单的给读者提供更多的有价值的信息。
 
 
                第七章:集中的函数退出途径
 
-虽然被某些人声称已经过时,但是goto语句的等价物还是经常被编译器所使用,具体形式是
+虽然被某些人声称已经过时,但是 goto 语句的等价物还是经常被编译器所使用,具体形式是
 无条件跳转指令。
 
-当一个函数从多个位置退出并且需要做一些通用的清理工作的时候,goto的好处就显现出来
-
+当一个函数从多个位置退出,并且需要做一些类似清理的常见操作时,goto 语句就很方便了。
+如果并不需要清理操作,那么直接 return 即可
 
 理由是:
 
@@ -354,26 +355,37 @@ EXPORT_SYMBOL(system_is_up);
 - 可以避免由于修改时忘记更新某个单独的退出点而导致的错误
 - 减轻了编译器的工作,无需删除冗余代码;)
 
-int fun(int a)
-{
-       int result = 0;
-       char *buffer = kmalloc(SIZE);
-
-       if (buffer == NULL)
-               return -ENOMEM;
-
-       if (condition1) {
-               while (loop1) {
-                       ...
+       int fun(int a)
+       {
+               int result = 0;
+               char *buffer;
+
+               buffer = kmalloc(SIZE, GFP_KERNEL);
+               if (!buffer)
+                       return -ENOMEM;
+
+               if (condition1) {
+                       while (loop1) {
+                               ...
+                       }
+                       result = 1;
+                       goto out_buffer;
                }
-               result = 1;
-               goto out;
+               ...
+       out_buffer:
+               kfree(buffer);
+               return result;
        }
-       ...
-out:
-       kfree(buffer);
-       return result;
-}
+
+一个需要注意的常见错误是“一个 err 错误”,就像这样:
+
+       err:
+               kfree(foo->bar);
+               kfree(foo);
+               return ret;
+
+这段代码的错误是,在某些退出路径上 “foo” 是 NULL。通常情况下,通过把它分离成两个
+错误标签 “err_bar:” 和 “err_foo:” 来修复这个错误。
 
                第八章:注释
 
@@ -386,10 +398,10 @@ out:
 加太多。你应该做的,是把注释放在函数的头部,告诉人们它做了什么,也可以加上它做这
 些事情的原因。
 
-当注释内核API函数时,请使用kernel-doc格式。请看
-Documentation/kernel-doc-nano-HOWTO.txt和scripts/kernel-doc以获得详细信息。
+当注释内核API函数时,请使用 kernel-doc 格式。请看
+Documentation/kernel-doc-nano-HOWTO.txt和scripts/kernel-doc 以获得详细信息。
 
-Linux的注释风格是C89“/* ... */”风格。不要使用C99风格“// ...”注释。
+Linux的注释风格是 C89 “/* ... */” 风格。不要使用 C99 风格 “// ...” 注释。
 
 长(多行)的首选注释风格是:
 
@@ -402,6 +414,15 @@ Linux的注释风格是C89“/* ... */”风格。不要使用C99风格“// ...
         * with beginning and ending almost-blank lines.
         */
 
+对于在 net/ 和 drivers/net/ 的文件,首选的长(多行)注释风格有些不同。
+
+       /* The preferred comment style for files in net/ and drivers/net
+        * looks like this.
+        *
+        * It is nearly the same as the generally preferred comment style,
+        * but there is no initial almost-blank line.
+        */
+
 注释数据也是很重要的,不管是基本类型还是衍生类型。为了方便实现这一点,每一行应只
 声明一个数据(不要使用逗号来一次声明多个数据)。这样你就有空间来为每个数据写一段
 小注释来解释它们的用途了。
@@ -409,49 +430,63 @@ Linux的注释风格是C89“/* ... */”风格。不要使用C99风格“// ...
 
                第九章:你已经把事情弄糟了
 
-这没什么,我们都是这样。可能你的使用了很长时间Unix的朋友已经告诉你“GNU emacs”能
-自动帮你格式化C源代码,而且你也注意到了,确实是这样,不过它所使用的默认值和我们
-想要的相去甚远(实际上,甚至比随机打的还要差——无数个猴子在GNU emacs里打字永远不
-会创造出一个好程序)(译注:请参考Infinite Monkey Theorem)
-
-所以你要么放弃GNU emacs,要么改变它让它使用更合理的设定。要采用后一个方案,你可
-以把下面这段粘贴到你的.emacs文件里。
-
-(defun linux-c-mode ()
-  "C mode with adjusted defaults for use with the Linux kernel."
-  (interactive)
-  (c-mode)
-  (c-set-style "K&R")
-  (setq tab-width 8)
-  (setq indent-tabs-mode t)
-  (setq c-basic-offset 8))
-
-这样就定义了M-x linux-c-mode命令。当你hack一个模块的时候,如果你把字符串
--*- linux-c -*-放在头两行的某个位置,这个模式将会被自动调用。如果你希望在你修改
-/usr/src/linux里的文件时魔术般自动打开linux-c-mode的话,你也可能需要添加
-
-(setq auto-mode-alist (cons '("/usr/src/linux.*/.*\\.[ch]$" . linux-c-mode)
-                       auto-mode-alist))
-
-到你的.emacs文件里。
-
-不过就算你尝试让emacs正确的格式化代码失败了,也并不意味着你失去了一切:还可以用“
-indent”。
-
-不过,GNU indent也有和GNU emacs一样有问题的设定,所以你需要给它一些命令选项。不
-过,这还不算太糟糕,因为就算是GNU indent的作者也认同K&R的权威性(GNU的人并不是坏
-人,他们只是在这个问题上被严重的误导了),所以你只要给indent指定选项“-kr -i8”
-(代表“K&R,8个字符缩进”),或者使用“scripts/Lindent”,这样就可以以最时髦的方式
+这没什么,我们都是这样。可能你的使用了很长时间 Unix 的朋友已经告诉你 “GNU emacs” 能
+自动帮你格式化 C 源代码,而且你也注意到了,确实是这样,不过它所使用的默认值和我们
+想要的相去甚远(实际上,甚至比随机打的还要差——无数个猴子在 GNU emacs 里打字永远不
+会创造出一个好程序)(译注:请参考 Infinite Monkey Theorem)
+
+所以你要么放弃 GNU emacs,要么改变它让它使用更合理的设定。要采用后一个方案,你可
+以把下面这段粘贴到你的 .emacs 文件里。
+
+(defun c-lineup-arglist-tabs-only (ignored)
+  "Line up argument lists by tabs, not spaces"
+  (let* ((anchor (c-langelem-pos c-syntactic-element))
+         (column (c-langelem-2nd-pos c-syntactic-element))
+         (offset (- (1+ column) anchor))
+         (steps (floor offset c-basic-offset)))
+    (* (max steps 1)
+       c-basic-offset)))
+
+(add-hook 'c-mode-common-hook
+          (lambda ()
+            ;; Add kernel style
+            (c-add-style
+             "linux-tabs-only"
+             '("linux" (c-offsets-alist
+                        (arglist-cont-nonempty
+                         c-lineup-gcc-asm-reg
+                         c-lineup-arglist-tabs-only))))))
+
+(add-hook 'c-mode-hook
+          (lambda ()
+            (let ((filename (buffer-file-name)))
+              ;; Enable kernel mode for the appropriate files
+              (when (and filename
+                         (string-match (expand-file-name "~/src/linux-trees")
+                                       filename))
+                (setq indent-tabs-mode t)
+                (setq show-trailing-whitespace t)
+                (c-set-style "linux-tabs-only")))))
+
+这会让 emacs 在 ~/src/linux-trees 目录下的 C 源文件获得更好的内核代码风格。
+
+不过就算你尝试让 emacs 正确的格式化代码失败了,也并不意味着你失去了一切:还可以用
+“indent”。
+
+不过,GNU indent 也有和 GNU emacs 一样有问题的设定,所以你需要给它一些命令选项。不
+过,这还不算太糟糕,因为就算是 GNU indent 的作者也认同 K&R 的权威性(GNU 的人并不是
+坏人,他们只是在这个问题上被严重的误导了),所以你只要给 indent 指定选项 “-kr -i8”
+(代表 “K&R,8 个字符缩进”),或者使用 “scripts/Lindent”,这样就可以以最时髦的方式
 缩进源代码。
 
-“indent”有很多选项,特别是重新格式化注释的时候,你可能需要看一下它的手册页。不过
-记住:“indent”不能修正坏的编程习惯。
+“indent” 有很多选项,特别是重新格式化注释的时候,你可能需要看一下它的手册页。不过
+记住:“indent” 不能修正坏的编程习惯。
 
 
-               第十章:Kconfig配置文件
+               第十章:Kconfig 配置文件
 
-对于遍布源码树的所有Kconfig*配置文件来说,它们缩进方式与C代码相比有所不同。紧挨
-在“config”定义下面的行缩进一个制表符,帮助信息则再多缩进2个空格。比如:
+对于遍布源码树的所有 Kconfig* 配置文件来说,它们缩进方式与 C 代码相比有所不同。紧挨
+在 “config” 定义下面的行缩进一个制表符,帮助信息则再多缩进 2 个空格。比如:
 
 config AUDIT
        bool "Auditing support"
@@ -470,7 +505,7 @@ config ADFS_FS_RW
        depends on ADFS_FS
        ...
 
-要查看配置文件的完整文档,请看Documentation/kbuild/kconfig-language.txt。
+要查看配置文件的完整文档,请看 Documentation/kbuild/kconfig-language.txt。
 
 
                第十一章:数据结构
@@ -489,11 +524,11 @@ config ADFS_FS_RW
 很多数据结构实际上有2级引用计数,它们通常有不同“类”的用户。子类计数器统计子类用
 户的数量,每当子类计数器减至零时,全局计数器减一。
 
-这种“多级引用计数”的例子可以在内存管理(“struct mm_struct”:mm_usersmm_count)
+这种“多级引用计数”的例子可以在内存管理(“struct mm_struct”:mm_users 和 mm_count)
 和文件系统(“struct super_block”:s_count和s_active)中找到。
 
 记住:如果另一个执行线索可以找到你的数据结构,但是这个数据结构没有引用计数器,这
-里几乎肯定是一个bug。
+里几乎肯定是一个 bug。
 
 
                第十二章:宏,枚举和RTL
@@ -508,102 +543,128 @@ config ADFS_FS_RW
 
 一般的,如果能写成内联函数就不要写成像函数的宏。
 
-含有多个语句的宏应该被包含在一个do-while代码块里:
+含有多个语句的宏应该被包含在一个 do-while 代码块里:
 
-#define macrofun(a, b, c)                      \
-       do {                                    \
-               if (a == 5)                     \
-                       do_this(b, c);          \
-       } while (0)
+       #define macrofun(a, b, c)                       \
+               do {                                    \
+                       if (a == 5)                     \
+                               do_this(b, c);          \
+               } while (0)
 
 使用宏的时候应避免的事情:
 
 1) 影响控制流程的宏:
 
-#define FOO(x)                                 \
-       do {                                    \
-               if (blah(x) < 0)                \
-                       return -EBUGGERED;      \
-       } while(0)
+       #define FOO(x)                                  \
+               do {                                    \
+                       if (blah(x) < 0)                \
+                               return -EBUGGERED;      \
+               } while (0)
 
 非常不好。它看起来像一个函数,不过却能导致“调用”它的函数退出;不要打乱读者大脑里
 的语法分析器。
 
 2) 依赖于一个固定名字的本地变量的宏:
 
-#define FOO(val) bar(index, val)
+       #define FOO(val) bar(index, val)
 
 可能看起来像是个不错的东西,不过它非常容易把读代码的人搞糊涂,而且容易导致看起来
 不相关的改动带来错误。
 
-3) 作为左值的带参数的宏: FOO(x) = y;如果有人把FOO变成一个内联函数的话,这种用
+3) 作为左值的带参数的宏: FOO(x) = y;如果有人把 FOO 变成一个内联函数的话,这种用
 法就会出错了。
 
 4) 忘记了优先级:使用表达式定义常量的宏必须将表达式置于一对小括号之内。带参数的
 宏也要注意此类问题。
 
-#define CONSTANT 0x4000
-#define CONSTEXP (CONSTANT | 3)
+       #define CONSTANT 0x4000
+       #define CONSTEXP (CONSTANT | 3)
+
+5) 在宏里定义类似函数的本地变量时命名冲突:
 
-cpp手册对宏的讲解很详细。Gcc internals手册也详细讲解了RTL(译注:register
+       #define FOO(x)                          \
+       ({                                      \
+               typeof(x) ret;                  \
+               ret = calc_ret(x);              \
+               (ret);                          \
+       })
+
+ret 是本地变量的通用名字 - __foo_ret 更不容易与一个已存在的变量冲突。
+
+cpp 手册对宏的讲解很详细。gcc internals 手册也详细讲解了 RTL(译注:register
 transfer language),内核里的汇编语言经常用到它。
 
 
                第十三章:打印内核消息
 
 内核开发者应该是受过良好教育的。请一定注意内核信息的拼写,以给人以好的印象。不要
-用不规范的单词比如“dont”,而要用“do not”或者“don't”。保证这些信息简单、明了、无
-歧义。
+用不规范的单词比如 “dont”,而要用 “do not”或者 “don't”。保证这些信息简单、明了、
\97 æ­§ä¹\89ã\80\82
 
 内核信息不必以句号(译注:英文句号,即点)结束。
 
-在小括号里打印数字(%d)没有任何价值,应该避免这样做。
+在小括号里打印数字 (%d) 没有任何价值,应该避免这样做。
 
-<linux/device.h>里有一些驱动模型诊断宏,你应该使用它们,以确保信息对应于正确的
-设备和驱动,并且被标记了正确的消息级别。这些宏有:dev_err(), dev_warn(),
-dev_info()等等。对于那些不和某个特定设备相关连的信息,<linux/kernel.h>定义了
-pr_debug()和pr_info()
+<linux/device.h> 里有一些驱动模型诊断宏,你应该使用它们,以确保信息对应于正确的
+设备和驱动,并且被标记了正确的消息级别。这些宏有:dev_err(),dev_warn(),
+dev_info() 等等。对于那些不和某个特定设备相关连的信息,<linux/printk.h> 定义了
+pr_notice(),pr_info(),pr_warn(),pr_err() 和其他
 
-写出好的调试信息可以是一个很大的挑战;当你写出来之后,这些信息在远程除错的时候
-就会成为极大的帮助。当DEBUG符号没有被定义的时候,这些信息不应该被编译进内核里
-(也就是说,默认地,它们不应该被包含在内)。如果你使用dev_dbg()或者pr_debug(),
-就能自动达到这个效果。很多子系统拥有Kconfig选项来启用-DDEBUG。还有一个相关的惯例
-是使用VERBOSE_DEBUG来添加dev_vdbg()消息到那些已经由DEBUG启用的消息之上。
+写出好的调试信息可以是一个很大的挑战;一旦你写出后,这些信息在远程除错时能提供极大
+的帮助。然而打印调试信息的处理方式同打印非调试信息不同。其他 pr_XXX() 函数能无条件地
+打印,pr_debug() 却不;默认情况下它不会被编译,除非定义了 DEBUG 或设定了
+CONFIG_DYNAMIC_DEBUG。实际这同样是为了 dev_dbg(),一个相关约定是在一个已经开启了
+DEBUG 时,使用 VERBOSE_DEBUG 来添加 dev_vdbg()。
+
+许多子系统拥有 Kconfig 调试选项来开启 -DDEBUG 在对应的 Makefile 里面;在其他
+情况下,特殊文件使用 #define DEBUG。当一条调试信息需要被无条件打印时,例如,如果
+已经包含一个调试相关的 #ifdef 条件,printk(KERN_DEBUG ...) 就可被使用。
 
 
                第十四章:分配内存
 
-内核提供了下面的一般用途的内存分配函数:kmalloc(),kzalloc(),kcalloc()和
-vmalloc()。请参考API文档以获取有关它们的详细信息。
+内核提供了下面的一般用途的内存分配函数:
+kmalloc(),kzalloc(),kmalloc_array(),kcalloc(),vmalloc() 和 vzalloc()。
+请参考 API 文档以获取有关它们的详细信息。
 
 传递结构体大小的首选形式是这样的:
 
        p = kmalloc(sizeof(*p), ...);
 
-另外一种传递方式中,sizeof的操作数是结构体的名字,这样会降低可读性,并且可能会引
-入bug。有可能指针变量类型被改变时,而对应的传递给内存分配函数的sizeof的结果不变。
+另外一种传递方式中,sizeof 的操作数是结构体的名字,这样会降低可读性,并且可能会引
+入 bug。有可能指针变量类型被改变时,而对应的传递给内存分配函数的 sizeof 的结果不变。
 
-强制转换一个void指针返回值是多余的。C语言本身保证了从void指针到其他任何指针类型
+强制转换一个 void 指针返回值是多余的。C 语言本身保证了从 void 指针到其他任何指针类型
 的转换是没有问题的。
 
+分配一个数组的首选形式是这样的:
+
+       p = kmalloc_array(n, sizeof(...), ...);
+
+分配一个零长数组的首选形式是这样的:
+
+       p = kcalloc(n, sizeof(...), ...);
+
+两种形式检查分配大小 n * sizeof(...) 的溢出,如果溢出返回 NULL。
+
 
                第十五章:内联弊病
 
-有一个常见的误解是内联函数是gcc提供的可以让代码运行更快的一个选项。虽然使用内联
+有一个常见的误解是内联函数是 gcc 提供的可以让代码运行更快的一个选项。虽然使用内联
 函数有时候是恰当的(比如作为一种替代宏的方式,请看第十二章),不过很多情况下不是
-这样。inline关键字的过度使用会使内核变大,从而使整个系统运行速度变慢。因为大内核
+这样。inline 关键字的过度使用会使内核变大,从而使整个系统运行速度变慢。因为大内核
 会占用更多的指令高速缓存(译注:一级缓存通常是指令缓存和数据缓存分开的)而且会导
-致pagecache的可用内存减少。想象一下,一次pagecache未命中就会导致一次磁盘寻址,将
-耗时5毫秒。5毫秒的时间内CPU能执行很多很多指令。
+致 pagecache 的可用内存减少。想象一下,一次pagecache未命中就会导致一次磁盘寻址,
+将耗时 5 毫秒。5 毫秒的时间内 CPU 能执行很多很多指令。
 
-一个基本的原则是如果一个函数有3行以上,就不要把它变成内联函数。这个原则的一个例
+一个基本的原则是如果一个函数有 3 行以上,就不要把它变成内联函数。这个原则的一个例
 外是,如果你知道某个参数是一个编译时常量,而且因为这个常量你确定编译器在编译时能
-优化掉你的函数的大部分代码,那仍然可以给它加上inline关键字。kmalloc()内联函数就
+优化掉你的函数的大部分代码,那仍然可以给它加上 inline 关键字。kmalloc() 内联函数就
 是一个很好的例子。
 
-人们经常主张给static的而且只用了一次的函数加上inline,如此不会有任何损失,因为没
-有什么好权衡的。虽然从技术上说这是正确的,但是实际上这种情况下即使不加inline gcc
-也可以自动使其内联。而且其他用户可能会要求移除inline,由此而来的争论会抵消inline
+人们经常主张给 static 的而且只用了一次的函数加上 inline,如此不会有任何损失,因为没
+有什么好权衡的。虽然从技术上说这是正确的,但是实际上这种情况下即使不加 inline gcc
+也可以自动使其内联。而且其他用户可能会要求移除 inline,由此而来的争论会抵消 inline
 自身的潜在价值,得不偿失。
 
 
@@ -613,37 +674,37 @@ vmalloc()。请参考API文档以获取有关它们的详细信息。
 的一个值可以表示为一个错误代码整数(-Exxx=失败,0=成功)或者一个“成功”布尔值(
 0=失败,非0=成功)。
 
-混合使用这两种表达方式是难于发现的bug的来源。如果C语言本身严格区分整形和布尔型变
-量,那么编译器就能够帮我们发现这些错误……不过C语言不区分。为了避免产生这种bug,请
+混合使用这两种表达方式是难于发现的 bug 的来源。如果 C 语言本身严格区分整形和布尔型变
+量,那么编译器就能够帮我们发现这些错误……不过 C 语言不区分。为了避免产生这种 bug,请
 遵循下面的惯例:
 
        如果函数的名字是一个动作或者强制性的命令,那么这个函数应该返回错误代码整
        数。如果是一个判断,那么函数应该返回一个“成功”布尔值。
 
-比如,“add work”是一个命令,所以add_work()函数在成功时返回0,在失败时返回-EBUSY。
-类似的,因为“PCI device present”是一个判断,所以pci_dev_present()函数在成功找到
-一个匹配的设备时应该返回1,如果找不到时应该返回0。
+比如,“add work” 是一个命令,所以 add_work() 函数在成功时返回 0,在失败时返回 -EBUSY。
+类似的,因为 “PCI device present” 是一个判断,所以 pci_dev_present() 函数在成功找到
+一个匹配的设备时应该返回 1,如果找不到时应该返回 0。
 
 所有导出(译注:EXPORT)的函数都必须遵守这个惯例,所有的公共函数也都应该如此。私
 有(static)函数不需要如此,但是我们也推荐这样做。
 
 返回值是实际计算结果而不是计算是否成功的标志的函数不受此惯例的限制。一般的,他们
 通过返回一些正常值范围之外的结果来表示出错。典型的例子是返回指针的函数,他们使用
-NULL或者ERR_PTR机制来报告错误。
+NULL 或者 ERR_PTR 机制来报告错误。
 
 
                第十七章:不要重新发明内核宏
 
-头文件include/linux/kernel.h包含了一些宏,你应该使用它们,而不要自己写一些它们的
+头文件 include/linux/kernel.h 包含了一些宏,你应该使用它们,而不要自己写一些它们的
 变种。比如,如果你需要计算一个数组的长度,使用这个宏
 
-  #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+       #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
 类似的,如果你要计算某结构体成员的大小,使用
 
-  #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+       #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
 
-还有可以做严格的类型检查的min()和max()宏,如果你需要可以使用它们。你可以自己看看
+还有可以做严格的类型检查的 min() 和 max() 宏,如果你需要可以使用它们。你可以自己看看
 那个头文件里还定义了什么你可以拿来用的东西,如果有定义的话,你就不应在你的代码里
 自己重新定义。
 
@@ -653,42 +714,100 @@ NULL或者ERR_PTR机制来报告错误。
 有一些编辑器可以解释嵌入在源文件里的由一些特殊标记标明的配置信息。比如,emacs
 能够解释被标记成这样的行:
 
--*- mode: c -*-
+       -*- mode: c -*-
 
 或者这样的:
 
-/*
-Local Variables:
-compile-command: "gcc -DMAGIC_DEBUG_FLAG foo.c"
-End:
-*/
+       /*
+       Local Variables:
+       compile-command: "gcc -DMAGIC_DEBUG_FLAG foo.c"
+       End:
+       */
 
-Vim能够解释这样的标记:
+Vim 能够解释这样的标记:
 
-/* vim:set sw=8 noet */
+       /* vim:set sw=8 noet */
 
 不要在源代码中包含任何这样的内容。每个人都有他自己的编辑器配置,你的源文件不应
 该覆盖别人的配置。这包括有关缩进和模式配置的标记。人们可以使用他们自己定制的模
 式,或者使用其他可以产生正确的缩进的巧妙方法。
 
 
+               第十九章:内联汇编
+
+在特定架构的代码中,你也许需要内联汇编来使用 CPU 接口和平台相关功能。在需要
+这么做时,不要犹豫。然而,当 C 可以完成工作时,不要无端地使用内联汇编。如果
+可能,你可以并且应该用 C 和硬件交互。
+
+考虑去写通用一点的内联汇编作为简明的辅助函数,而不是重复写下它们的细节。记住
+内联汇编可以使用 C 参数。
+
+大而特殊的汇编函数应该放在 .S 文件中,对应 C 的原型定义在 C 头文件中。汇编
+函数的 C 原型应该使用 “asmlinkage”。
+
+你可能需要将你的汇编语句标记为 volatile,来阻止 GCC 在没发现任何副作用后就
+移除了它。你不必总是这样做,虽然,这样可以限制不必要的优化。
+
+在写一个包含多条指令的单个内联汇编语句时,把每条指令用引号字符串分离,并写在
+单独一行,在每个字符串结尾,除了 \n\t 结尾之外,在汇编输出中适当地缩进下
+一条指令:
+
+       asm ("magic %reg1, #42\n\t"
+            "more_magic %reg2, %reg3"
+            : /* outputs */ : /* inputs */ : /* clobbers */);
+
+
+               第二十章:条件编译
+
+只要可能,就不要在 .c 文件里面使用预处理条件;这样做让代码更难阅读并且逻辑难以
+跟踪。替代方案是,在头文件定义函数在这些 .c 文件中使用这类的条件表达式,提供空
+操作的桩版本(译注:桩程序,是指用来替换一部分功能的程序段)在 #else 情况下,
+再从 .c 文件中无条件地调用这些函数。编译器会避免生成任何桩调用的代码,产生一致
+的结果,但逻辑将更加清晰。
+
+宁可编译整个函数,而不是部分函数或部分表达式。而不是在一个表达式添加 ifdef,
+解析部分或全部表达式到一个单独的辅助函数,并应用条件到该函数内。
+
+如果你有一个在特定配置中可能是未使用的函数或变量,编译器将警告它定义了但未使用,
+标记这个定义为 __maybe_unused 而不是将它包含在一个预处理条件中。(然而,如果
+一个函数或变量总是未使用的,就直接删除它。)
+
+在代码中,可能的情况下,使用 IS_ENABLED 宏来转化某个 Kconfig 标记为 C 的布尔
+表达式,并在正常的 C 条件中使用它:
+
+       if (IS_ENABLED(CONFIG_SOMETHING)) {
+               ...
+       }
+
+编译器会无条件地做常数合并,就像使用 #ifdef 那样,包含或排除代码块,所以这不会
+带来任何运行时开销。然而,这种方法依旧允许 C 编译器查看块内的代码,并检查它的正确
+性(语法,类型,符号引用,等等)。因此,如果条件不满足,代码块内的引用符号将不存在,
+你必须继续使用 #ifdef。
+
+在任何有意义的 #if 或 #ifdef 块的末尾(超过几行),在 #endif 同一行的后面写下
+注释,指出该条件表达式被使用。例如:
+
+       #ifdef CONFIG_SOMETHING
+       ...
+       #endif /* CONFIG_SOMETHING */
+
 
                附录 I:参考
 
-The C Programming Language, 第二版, 作者Brian W. Kernighan和Denni
-M. Ritchie. Prentice Hall, Inc., 1988. ISBN 0-13-110362-8 (软皮),
-0-13-110370-9 (硬皮). URL: http://cm.bell-labs.com/cm/cs/cbook/
+The C Programming Language, 第二版
+作者:Brian W. Kernighan 和 Denni M. Ritchie.
+Prentice Hall, Inc., 1988.
+ISBN 0-13-110362-8 (软皮), 0-13-110370-9 (硬皮).
 
-The Practice of Programming 作者Brian W. Kernighan和Rob Pike.  Addison-Wesley,
-Inc., 1999.  ISBN 0-201-61586-X.  URL: http://cm.bell-labs.com/cm/cs/tpop/
+The Practice of Programming
+作者:Brian W. Kernighan 和 Rob Pike.
+Addison-Wesley, Inc., 1999.
+ISBN 0-201-61586-X.
 
-cpp,gcc,gcc internals和indent的GNU手册——和K&R及本文相符合的部分,全部可以在
-http://www.gnu.org/manual/找到
+GNU 手册 - 遵循 K&R 标准和此文本 - cpp, gcc, gcc internals and indent,
+都可以从 http://www.gnu.org/manual/ 找到
 
 WG14是C语言的国际标准化工作组,URL: http://www.open-std.org/JTC1/SC22/WG14/
 
-Kernel CodingStyle,作者greg@kroah.com发表于OLS 2002:
+Kernel CodingStyle,作者 greg@kroah.com 发表于OLS 2002:
 http://www.kroah.com/linux/talks/ols_2002_kernel_codingstyle_talk/html/
-
---
-最后更新于2007年7月13日。
index 949a920ca1984d134754a58d183bc29688276618..6fc95411d7053609c0ad58d054f9edbbaf01b3c8 100644 (file)
@@ -595,6 +595,10 @@ S: Odd Fixes
 L:     linux-alpha@vger.kernel.org
 F:     arch/alpha/
 
+ALPS PS/2 TOUCHPAD DRIVER
+R:     Pali Rohár <pali.rohar@gmail.com>
+F:     drivers/input/mouse/alps.*
+
 ALTERA MAILBOX DRIVER
 M:     Ley Foon Tan <lftan@altera.com>
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
@@ -865,9 +869,17 @@ F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
 ARM HDLCD DRM DRIVER
 M:     Liviu Dudau <liviu.dudau@arm.com>
 S:     Supported
-F:     drivers/gpu/drm/arm/
+F:     drivers/gpu/drm/arm/hdlcd_*
 F:     Documentation/devicetree/bindings/display/arm,hdlcd.txt
 
+ARM MALI-DP DRM DRIVER
+M:     Liviu Dudau <liviu.dudau@arm.com>
+M:     Brian Starkey <brian.starkey@arm.com>
+M:     Mali DP Maintainers <malidp@foss.arm.com>
+S:     Supported
+F:     drivers/gpu/drm/arm/
+F:     Documentation/devicetree/bindings/display/arm,malidp.txt
+
 ARM MFM AND FLOPPY DRIVERS
 M:     Ian Molton <spyro@f2s.com>
 S:     Maintained
@@ -1159,6 +1171,7 @@ F:        arch/arm/mach-footbridge/
 ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
 M:     Shawn Guo <shawnguo@kernel.org>
 M:     Sascha Hauer <kernel@pengutronix.de>
+R:     Fabio Estevam <fabio.estevam@nxp.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
@@ -1689,8 +1702,6 @@ S:        Maintained
 F:     drivers/edac/altera_edac.
 
 ARM/STI ARCHITECTURE
-M:     Srinivas Kandagatla <srinivas.kandagatla@gmail.com>
-M:     Maxime Coquelin <maxime.coquelin@st.com>
 M:     Patrice Chotard <patrice.chotard@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kernel@stlinux.com
@@ -1723,6 +1734,7 @@ F:        drivers/ata/ahci_st.c
 
 ARM/STM32 ARCHITECTURE
 M:     Maxime Coquelin <mcoquelin.stm32@gmail.com>
+M:     Alexandre Torgue <alexandre.torgue@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
@@ -2775,9 +2787,9 @@ F:        include/net/caif/
 F:     net/caif/
 
 CALGARY x86-64 IOMMU
-M:     Muli Ben-Yehuda <muli@il.ibm.com>
-M:     "Jon D. Mason" <jdmason@kudzu.us>
-L:     discuss@x86-64.org
+M:     Muli Ben-Yehuda <mulix@mulix.org>
+M:     Jon Mason <jdmason@kudzu.us>
+L:     iommu@lists.linux-foundation.org
 S:     Maintained
 F:     arch/x86/kernel/pci-calgary_64.c
 F:     arch/x86/kernel/tce_64.c
@@ -3088,6 +3100,7 @@ M:        Stephen Boyd <sboyd@codeaurora.org>
 L:     linux-clk@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/clock/
 F:     drivers/clk/
 X:     drivers/clk/clkdev.c
 F:     include/linux/clk-pr*
@@ -3769,6 +3782,17 @@ F:       include/linux/*fence.h
 F:     Documentation/dma-buf-sharing.txt
 T:     git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 
+SYNC FILE FRAMEWORK
+M:     Sumit Semwal <sumit.semwal@linaro.org>
+R:     Gustavo Padovan <gustavo@padovan.org>
+S:     Maintained
+L:     linux-media@vger.kernel.org
+L:     dri-devel@lists.freedesktop.org
+F:     drivers/dma-buf/sync_file.c
+F:     include/linux/sync_file.h
+F:     Documentation/sync_file.txt
+T:     git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:     Vinod Koul <vinod.koul@intel.com>
 L:     dmaengine@vger.kernel.org
@@ -3776,6 +3800,7 @@ Q:        https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Maintained
 F:     drivers/dma/
 F:     include/linux/dmaengine.h
+F:     Documentation/devicetree/bindings/dma/
 F:     Documentation/dmaengine/
 T:     git git://git.infradead.org/users/vkoul/slave-dma.git
 
@@ -3857,7 +3882,10 @@ T:       git git://people.freedesktop.org/~airlied/linux
 S:     Maintained
 F:     drivers/gpu/drm/
 F:     drivers/gpu/vga/
-F:     Documentation/DocBook/gpu.*
+F:     Documentation/devicetree/bindings/display/
+F:     Documentation/devicetree/bindings/gpu/
+F:     Documentation/devicetree/bindings/video/
+F:     Documentation/gpu/
 F:     include/drm/
 F:     include/uapi/drm/
 
@@ -3909,6 +3937,7 @@ S:        Supported
 F:     drivers/gpu/drm/i915/
 F:     include/drm/i915*
 F:     include/uapi/drm/i915_drm.h
+F:     Documentation/gpu/i915.rst
 
 DRM DRIVERS FOR ATMEL HLCDC
 M:     Boris Brezillon <boris.brezillon@free-electrons.com>
@@ -4104,6 +4133,21 @@ F:       drivers/gpu/drm/vc4/
 F:     include/uapi/drm/vc4_drm.h
 F:     Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
 
+DRM DRIVERS FOR TI OMAP
+M:     Tomi Valkeinen <tomi.valkeinen@ti.com>
+L:     dri-devel@lists.freedesktop.org
+S:     Maintained
+F:     drivers/gpu/drm/omapdrm/
+F:     Documentation/devicetree/bindings/display/ti/
+
+DRM DRIVERS FOR TI LCDC
+M:     Jyri Sarha <jsarha@ti.com>
+R:     Tomi Valkeinen <tomi.valkeinen@ti.com>
+L:     dri-devel@lists.freedesktop.org
+S:     Maintained
+F:     drivers/gpu/drm/tilcdc/
+F:     Documentation/devicetree/bindings/display/tilcdc/
+
 DSBR100 USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -4470,7 +4514,7 @@ S:        Orphan
 F:     fs/efs/
 
 EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M:     Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
+M:     Douglas Miller <dougmill@linux.vnet.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/ibm/ehea/
@@ -6496,6 +6540,7 @@ F:        include/uapi/linux/sunrpc/
 
 KERNEL SELFTEST FRAMEWORK
 M:     Shuah Khan <shuahkh@osg.samsung.com>
+M:     Shuah Khan <shuah@kernel.org>
 L:     linux-kselftest@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/shuah/linux-kselftest
 S:     Maintained
@@ -7440,7 +7485,7 @@ F:        drivers/scsi/megaraid.*
 F:     drivers/scsi/megaraid/
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
-M:     Eugenia Emantayev <eugenia@mellanox.com>
+M:     Tariq Toukan <tariqt@mellanox.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
@@ -7492,6 +7537,7 @@ Q:        http://patchwork.ozlabs.org/project/linux-mtd/list/
 T:     git git://git.infradead.org/linux-mtd.git
 T:     git git://git.infradead.org/l2-mtd.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/mtd/
 F:     drivers/mtd/
 F:     include/linux/mtd/
 F:     include/uapi/mtd/
@@ -8022,6 +8068,7 @@ Q:        http://patchwork.ozlabs.org/project/netdev/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
 S:     Odd Fixes
+F:     Documentation/devicetree/bindings/net/
 F:     drivers/net/
 F:     include/linux/if_*
 F:     include/linux/netdevice.h
@@ -8040,6 +8087,7 @@ Q:        http://patchwork.kernel.org/project/linux-wireless/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/net/wireless/
 F:     drivers/net/wireless/
 
 NETXEN (1/10) GbE SUPPORT
@@ -8437,10 +8485,9 @@ F:       drivers/i2c/busses/i2c-ocores.c
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:     Rob Herring <robh+dt@kernel.org>
 M:     Frank Rowand <frowand.list@gmail.com>
-M:     Grant Likely <grant.likely@linaro.org>
 L:     devicetree@vger.kernel.org
 W:     http://www.devicetree.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
 S:     Maintained
 F:     drivers/of/
 F:     include/linux/of*.h
@@ -8448,12 +8495,10 @@ F:      scripts/dtc/
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 M:     Rob Herring <robh+dt@kernel.org>
-M:     Pawel Moll <pawel.moll@arm.com>
 M:     Mark Rutland <mark.rutland@arm.com>
-M:     Ian Campbell <ijc+devicetree@hellion.org.uk>
-M:     Kumar Gala <galak@codeaurora.org>
 L:     devicetree@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
+Q:     http://patchwork.ozlabs.org/project/devicetree-bindings/list/
 S:     Maintained
 F:     Documentation/devicetree/
 F:     arch/*/boot/dts/
@@ -8977,6 +9022,8 @@ M:        Linus Walleij <linus.walleij@linaro.org>
 L:     linux-gpio@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/pinctrl/
+F:     Documentation/pinctrl.txt
 F:     drivers/pinctrl/
 F:     include/linux/pinctrl/
 
@@ -11923,7 +11970,8 @@ F:      drivers/usb/common/usb-otg-fsm.c
 
 USB OVER IP DRIVER
 M:     Valentina Manea <valentina.manea.m@gmail.com>
-M:     Shuah Khan <shuah.kh@samsung.com>
+M:     Shuah Khan <shuahkh@osg.samsung.com>
+M:     Shuah Khan <shuah@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/usb/usbip_protocol.txt
@@ -11994,6 +12042,7 @@ L:      linux-usb@vger.kernel.org
 W:     http://www.linux-usb.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
 S:     Supported
+F:     Documentation/devicetree/bindings/usb/
 F:     Documentation/usb/
 F:     drivers/usb/
 F:     include/linux/usb.h
@@ -12167,6 +12216,7 @@ VIRTIO CORE, NET AND BLOCK DRIVERS
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 L:     virtualization@lists.linux-foundation.org
 S:     Maintained
+F:     Documentation/devicetree/bindings/virtio/
 F:     drivers/virtio/
 F:     tools/virtio/
 F:     drivers/net/virtio_net.c
index 0f70de63cfdb450c31e4a346d669047585c8063c..d30c59f9ce92afc4716db730431a6d7d317bc37b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
@@ -363,11 +363,13 @@ CHECK             = sparse
 
 CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
                  -Wbitwise -Wno-return-void $(CF)
+NOSTDINC_FLAGS  =
 CFLAGS_MODULE   =
 AFLAGS_MODULE   =
 LDFLAGS_MODULE  =
 CFLAGS_KERNEL  =
 AFLAGS_KERNEL  =
+LDFLAGS_vmlinux =
 CFLAGS_GCOV    = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
 CFLAGS_KCOV    = -fsanitize-coverage=trace-pc
 
@@ -1412,8 +1414,11 @@ $(help-board-dirs): help-%:
 
 # Documentation targets
 # ---------------------------------------------------------------------------
-%docs: scripts_basic FORCE
+DOC_TARGETS := xmldocs sgmldocs psdocs pdfdocs htmldocs mandocs installmandocs epubdocs cleandocs
+PHONY += $(DOC_TARGETS)
+$(DOC_TARGETS): scripts_basic FORCE
        $(Q)$(MAKE) $(build)=scripts build_docproc build_check-lc_ctype
+       $(Q)$(MAKE) $(build)=Documentation -f $(srctree)/Documentation/Makefile.sphinx $@
        $(Q)$(MAKE) $(build)=Documentation/DocBook $@
 
 else # KBUILD_EXTMOD
index d794384a0404d2e9b1d1e9c3f6a966ca877c647c..15996290fed4a920f22e2687cb77944e9c3b3cfc 100644 (file)
@@ -226,8 +226,8 @@ config ARCH_INIT_TASK
 config ARCH_TASK_STRUCT_ALLOCATOR
        bool
 
-# Select if arch has its private alloc_thread_info() function
-config ARCH_THREAD_INFO_ALLOCATOR
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
        bool
 
 # Select if arch wants to size task_struct dynamically via arch_task_struct_size:
@@ -606,6 +606,9 @@ config HAVE_ARCH_HASH
          file which provides platform-specific implementations of some
          functions in <linux/hash.h> or fs/namei.c.
 
+config ISA_BUS_API
+       def_bool ISA
+
 #
 # ABI hall of shame
 #
index aab14a019c20f82c7cc60084026b120727971214..c2ebb6f36c9d3358699eac1ebb5f9dfad081b68f 100644 (file)
@@ -40,7 +40,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline pmd_t *
 pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return ret;
 }
 
@@ -53,7 +53,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline pte_t *
 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
index 0dcbacfdea4b98ca7805624a5ffb59b9298c4992..0d3e59f56974816f71140e2d0c51245f92a33768 100644 (file)
@@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
 config ARCH_DISCONTIGMEM_ENABLE
-       def_bool y
+       def_bool n
 
 config ARCH_FLATMEM_ENABLE
        def_bool y
@@ -186,9 +186,6 @@ if SMP
 config ARC_HAS_COH_CACHES
        def_bool n
 
-config ARC_HAS_REENTRANT_IRQ_LV2
-       def_bool n
-
 config ARC_MCIP
        bool "ARConnect Multicore IP (MCIP) Support "
        depends on ISA_ARCV2
@@ -366,25 +363,10 @@ config NODES_SHIFT
 if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
-       bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+       bool "Setup Timer IRQ as high Priority"
        default n
-       # Timer HAS to be high priority, for any other high priority config
-       select ARC_IRQ3_LV2
        # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
-       depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
-
-if ARC_COMPACT_IRQ_LEVELS
-
-config ARC_IRQ3_LV2
-       bool
-
-config ARC_IRQ5_LV2
-       bool
-
-config ARC_IRQ6_LV2
-       bool
-
-endif  #ARC_COMPACT_IRQ_LEVELS
+       depends on !SMP
 
 config ARC_FPU_SAVE_RESTORE
        bool "Enable FPU state persistence across context switch"
@@ -407,11 +389,6 @@ config ARC_HAS_LLSC
        default y
        depends on !ARC_CANT_LLSC
 
-config ARC_STAR_9000923308
-       bool "Workaround for llock/scond livelock"
-       default n
-       depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
-
 config ARC_HAS_SWAPE
        bool "Insn: SWAPE (endian-swap)"
        default y
@@ -471,7 +448,7 @@ config LINUX_LINK_BASE
 
 config HIGHMEM
        bool "High Memory Support"
-       select DISCONTIGMEM
+       select ARCH_DISCONTIGMEM_ENABLE
        help
          With ARC 2G:2G address split, only upper 2G is directly addressable by
          kernel. Enable this to potentially allow access to rest of 2G and PAE
index 02fabef2891cd8bf1a6eda36bcf93549eea16158..85814e74677d54209908d41025fc329d159d2902 100644 (file)
@@ -66,8 +66,6 @@ endif
 
 endif
 
-cflags-$(CONFIG_ARC_DW2_UNWIND)                += -fasynchronous-unwind-tables
-
 # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
 ifeq ($(atleast_gcc48),y)
 cflags-$(CONFIG_ARC_DW2_UNWIND)                += -gdwarf-2
@@ -127,7 +125,7 @@ libs-y              += arch/arc/lib/ $(LIBGCC)
 
 boot           := arch/arc/boot
 
-#default target for make without any arguements.
+#default target for make without any arguments.
 KBUILD_IMAGE   := bootpImage
 
 all:   $(KBUILD_IMAGE)
index 3942634f805ade1545d30dd066d2ba88cdec3cac..02410b2114334466572c05e651b6227b02e415f0 100644 (file)
@@ -23,8 +23,6 @@
 
 
 / {
-       clock-frequency         = <500000000>;  /* 500 MHZ */
-
        soc100 {
                bus-frequency   = <166666666>;
 
index b0467229a5c45e9c10eb1cc2884b84b33ef2e603..f9e7686044ebee0a4c49e1d4824810e49747256e 100644 (file)
@@ -23,8 +23,6 @@
 
 
 / {
-       clock-frequency         = <500000000>;  /* 500 MHZ */
-
        soc100 {
                bus-frequency   = <166666666>;
 
index 3e02f152edcb5eac50870720be8ffe626cb1b1b6..6ae2c476ad825aee57fadb1e557b95a0bb082647 100644 (file)
@@ -15,7 +15,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <750000000>;  /* 750 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
 
index 378e455a94c4a86aa2af9a44a4d83c2cbbf771c2..14df46f141bf3450b8df0660bc0f248beef4669b 100644 (file)
@@ -14,7 +14,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 64c94b2860ab569d03d4672db417ad1e3307f86a..3d6cfa32bf5142a50ff95b6f7e69871526172deb 100644 (file)
@@ -14,7 +14,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index b89f6c3eb35208e2686cf73017a08686a6cfad5e..1e0d225791c1250252e76697187a7d0ba8eb26ec 100644 (file)
@@ -18,7 +18,6 @@
 
 / {
        compatible = "ezchip,arc-nps";
-       clock-frequency = <83333333>;   /* 83.333333 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&intc>;
index 5d5e373e0ebc579c7f734d42212965c8187fa003..63970513e4aee2ceaadc2265eae1393115a0d2b6 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsim";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
index b5b060adce8a60dbde7778f0765720bd4617e724..e659a340ca8a7836735bc32bf5cc09c5e4b5907f 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsimosci";
-       clock-frequency = <20000000>;   /* 20 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
@@ -20,7 +19,7 @@
                /* this is for console on PGU */
                /* bootargs = "console=tty0 consoleblank=0"; */
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
        };
 
        aliases {
                        no-loopback-test = <1>;
                };
 
-               pgu0: pgu@f9000000 {
-                       compatible = "snps,arcpgufb";
+               pguclk: pguclk {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <25175000>;
+               };
+
+               pgu@f9000000 {
+                       compatible = "snps,arcpgu";
                        reg = <0xf9000000 0x400>;
+                       clocks = <&pguclk>;
+                       clock-names = "pxlclk";
                };
 
                ps2: ps2@f9001000 {
index 325e73090a18157ca286a2916c8950d760abaf8e..16ce5d65cfded4ac545eb2020604bc183d89d015 100644 (file)
@@ -11,7 +11,6 @@
 
 / {
        compatible = "snps,nsimosci_hs";
-       clock-frequency = <20000000>;   /* 20 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
@@ -20,7 +19,7 @@
                /* this is for console on PGU */
                /* bootargs = "console=tty0 consoleblank=0"; */
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug video=640x480-24";
        };
 
        aliases {
                        no-loopback-test = <1>;
                };
 
-               pgu0: pgu@f9000000 {
-                       compatible = "snps,arcpgufb";
+               pguclk: pguclk {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <25175000>;
+               };
+
+               pgu@f9000000 {
+                       compatible = "snps,arcpgu";
                        reg = <0xf9000000 0x400>;
+                       clocks = <&pguclk>;
+                       clock-names = "pxlclk";
                };
 
                ps2: ps2@f9001000 {
index ee03d71265816db05c43841d8a0e0981c6ca62c4..ce8dfbc30c4d1b67d55de644131769599a3e8ace 100644 (file)
 
 / {
        compatible = "snps,nsimosci_hs";
-       clock-frequency = <5000000>;    /* 5 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        interrupt-parent = <&core_intc>;
 
        chosen {
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblan=0 debug video=640x480-24";
        };
 
        aliases {
                        no-loopback-test = <1>;
                };
 
-               pgu0: pgu@f9000000 {
-                       compatible = "snps,arcpgufb";
+               pguclk: pguclk {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <25175000>;
+               };
+
+               pgu@f9000000 {
+                       compatible = "snps,arcpgu";
                        reg = <0xf9000000 0x400>;
+                       clocks = <&pguclk>;
+                       clock-names = "pxlclk";
                };
 
                ps2: ps2@f9001000 {
index 3a10cc633e2b0e200b8eacfd16ff99e56a718652..65808fe0a290be15ded0903ad347540d0f35a52e 100644 (file)
@@ -13,7 +13,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        chosen { };
index 71fd308a9298aac6435d67e6863314d26e140396..2dfe8037dfbb34ac680597619ecef27319bed1a2 100644 (file)
@@ -8,7 +8,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        chosen { };
index d1cb25a6698952b461925d5a361223c8bc3c5080..4c11079f3565a3decc7f6401a92950df16565411 100644 (file)
@@ -8,7 +8,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <80000000>;   /* 80 MHZ */
        #address-cells = <1>;
        #size-cells = <1>;
        chosen { };
index ad4ee43bd2ac7700ee93d1c05ee7c84e474c4829..0fd6ba985b164b7752c26544e5d4b6d9684c88be 100644 (file)
@@ -14,7 +14,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <50000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index a3cb6263c581ea8a030bad036ec1ddebfc2b25f9..82214cd7ba0ca7406483ad80ad0cac88425b7a68 100644 (file)
@@ -15,7 +15,6 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <50000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 45cd665fca232e80d244afe43239df125b1ac215..99498a4b42161cadaffd530566f28674a16986bf 100644 (file)
                                #clock-cells = <0>;
                        };
 
+                       pguclk: pguclk {
+                               #clock-cells = <0>;
+                               compatible = "fixed-clock";
+                               clock-frequency = <25175000>;
+                       };
                };
 
                ethernet@0x18000 {
                };
 
 /* PGU output directly sent to virtual LCD screen; hdmi controller not modelled */
-               pgu@0x17000 {
-                       compatible = "snps,arcpgufb";
+               pgu@17000 {
+                       compatible = "snps,arcpgu";
                        reg = <0x17000 0x400>;
-                       clock-frequency = <51000000>; /* PGU'clock is initated in init function */
-                       /* interrupts = <5>;   PGU interrupts not used, this vector is used for ps2 below */
+                       clocks = <&pguclk>;
+                       clock-names = "pxlclk";
                };
 
 /* VDK has additional ps2 keyboard/mouse interface integrated in LCD screen model */
index 031a5bc79b3e5751dad2de430d134efc83c2adf5..2ba60c399d99408fbe870e741fd3cd1e60df3602 100644 (file)
@@ -16,6 +16,6 @@
        compatible = "snps,axs103";
 
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0";
+               bootargs = "earlycon=uart8250,mmio32,0xe0022000,115200n8 console=tty0 console=ttyS3,115200n8 consoleblank=0 video=640x480-24";
        };
 };
index 42bafa552498fb5b65af2ca8ac00853d0ad562e3..98cf20933bbb3232da17fe94fdfe0ad3ddf38379 100644 (file)
@@ -58,7 +58,8 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=1
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-CONFIG_FB=y
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_HID is not set
index 4bb60c1cd4a2fcb895eac30b17e95ba5af8e5769..ddf8b96d494e90f4a776cdbea8276c54f0babc13 100644 (file)
@@ -57,7 +57,8 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=1
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-CONFIG_FB=y
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_HID is not set
index 7e88f4c720f80a874f53ae61d8146e5d704fe2d0..ceb90745326e52d85f3ca0a9092962c23b740910 100644 (file)
@@ -70,7 +70,8 @@ CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-CONFIG_FB=y
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_HID is not set
index 52ec315dc5c954722e182a41bd13372618287f2f..969b206d6c67bbbb72ad80edb104946b81197193 100644 (file)
@@ -63,12 +63,9 @@ CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_ARCPGU_RGB888=y
-CONFIG_ARCPGU_DISPTYPE=0
-# CONFIG_VGA_CONSOLE is not set
+CONFIG_DRM=y
+CONFIG_DRM_ARCPGU=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
index 5f3dcbbc0cc9c14df199c55ff9e916b64be3be0d..dd683995bc9d119136807adcbf71b189714fe359 100644 (file)
 
 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
-#ifdef CONFIG_ARC_STAR_9000923308
-
-#define SCOND_FAIL_RETRY_VAR_DEF                                               \
-       unsigned int delay = 1, tmp;                                            \
-
-#define SCOND_FAIL_RETRY_ASM                                                   \
-       "       bz      4f                      \n"                             \
-       "   ; --- scond fail delay ---          \n"                             \
-       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
-       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
-       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
-       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
-       "       b       1b                      \n"     /* start over */        \
-       "4: ; --- success ---                   \n"                             \
-
-#define SCOND_FAIL_RETRY_VARS                                                  \
-         ,[delay] "+&r" (delay),[tmp] "=&r"    (tmp)                           \
-
-#else  /* !CONFIG_ARC_STAR_9000923308 */
-
-#define SCOND_FAIL_RETRY_VAR_DEF
-
-#define SCOND_FAIL_RETRY_ASM                                                   \
-       "       bnz     1b                      \n"                             \
-
-#define SCOND_FAIL_RETRY_VARS
-
-#endif
-
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
-       unsigned int val;                                               \
-       SCOND_FAIL_RETRY_VAR_DEF                                        \
+       unsigned int val;                                               \
                                                                        \
        __asm__ __volatile__(                                           \
        "1:     llock   %[val], [%[ctr]]                \n"             \
        "       " #asm_op " %[val], %[val], %[i]        \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
-       SCOND_FAIL_RETRY_ASM                                            \
-                                                                       \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val) /* Early clobber to prevent reg reuse */  \
-         SCOND_FAIL_RETRY_VARS                                         \
        : [ctr] "r"     (&v->counter), /* Not "m": llock only supports reg direct addr mode */  \
          [i]   "ir"    (i)                                             \
        : "cc");                                                        \
@@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v)                    \
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
 static inline int atomic_##op##_return(int i, atomic_t *v)             \
 {                                                                      \
-       unsigned int val;                                               \
-       SCOND_FAIL_RETRY_VAR_DEF                                        \
+       unsigned int val;                                               \
                                                                        \
        /*                                                              \
         * Explicit full memory barrier needed before/after as          \
@@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v)           \
        "1:     llock   %[val], [%[ctr]]                \n"             \
        "       " #asm_op " %[val], %[val], %[i]        \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
-       SCOND_FAIL_RETRY_ASM                                            \
-                                                                       \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val)                                           \
-         SCOND_FAIL_RETRY_VARS                                         \
        : [ctr] "r"     (&v->counter),                                  \
          [i]   "ir"    (i)                                             \
        : "cc");                                                        \
index e0e1faf03c50177f69232f2990dd5e4a4ac19b6b..14c310f2e0b198ecac4d4d9f19a33d80b258005b 100644 (file)
@@ -76,8 +76,8 @@
         * We need to be a bit more cautious here. What if a kernel bug in
         * L1 ISR, caused SP to go whaco (some small value which looks like
         * USER stk) and then we take L2 ISR.
-        * Above brlo alone would treat it as a valid L1-L2 sceanrio
-        * instead of shouting alound
+        * Above brlo alone would treat it as a valid L1-L2 scenario
+        * instead of shouting around
         * The only feasible way is to make sure this L2 happened in
         * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
         * L1 ISR before it switches stack
index 1fd467ef658fe861b34f36247674d8f3e167acad..b0b87f2447f5248b3b2bcf1e13b14a5ad85d2d44 100644 (file)
@@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
                local_flush_tlb_all();
 
                /*
-                * Above checke for rollover of 8 bit ASID in 32 bit container.
+                * Above check for rollover of 8 bit ASID in 32 bit container.
                 * If the container itself wrapped around, set it to a non zero
                 * "generation" to distinguish from no context
                 */
index 86ed671286df377bb6231013724737a135a86d69..3749234b74196cb2f4770b7ed19290aa9cdcc98b 100644 (file)
@@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                         __get_order_pte());
 
        return pte;
@@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
        pgtable_t pte_pg;
        struct page *page;
 
-       pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+       pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
        if (!pte_pg)
                return 0;
        memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
index 034bbdc0ff61c3bffadd0284d776240d8137767d..858f98ef7f1ba3b1fd1ccda9298f2484f7749418 100644 (file)
@@ -47,7 +47,7 @@
  * Page Tables are purely for Linux VM's consumption and the bits below are
  * suited to that (uniqueness). Hence some are not implemented in the TLB and
  * some have different value in TLB.
- * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
  *      seperate PD0 and PD1, which combined forms a translation entry)
  *      while for PTE perspective, they are 8 and 9 respectively
  * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
index f9048994b22f857a487205de5b5a25c4d3dc1f5f..16b630fbeb6acfb09fc59d20c4f965a0bb3e59aa 100644 (file)
@@ -78,7 +78,7 @@ struct task_struct;
 #define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
 
 /*
- * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * Where about of Task's sp, fp, blink when it was last seen in kernel mode.
  * Look in process.c for details of kernel stack layout
  */
 #define TSK_K_ESP(tsk)         (tsk->thread.ksp)
index 991380438d6bfd6af03e6fff4748b8c5342b6a70..89fdd1b0a76ebe672094daa10134204cb96cf925 100644 (file)
@@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void)
  * (1) These insn were introduced only in 4.10 release. So for older released
  *     support needed.
  *
- * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be
  *     gaurantted by the platform (not something which core handles).
  *     Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
  *     disabling for atomicity.
index 800e7c430ca589724c9467a3a9689aad6faf6bc3..cded4a9b543822d5ff9e2099fb05c67bdfc8e84a 100644 (file)
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
-/*
- * A normal LLOCK/SCOND based system, w/o need for livelock workaround
- */
-#ifndef CONFIG_ARC_STAR_9000923308
-
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        unsigned int val;
@@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
        smp_mb();
 }
 
-#else  /* CONFIG_ARC_STAR_9000923308 */
-
-/*
- * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
- * coherency transactions in the SCU. The exclusive line state keeps rotating
- * among contenting cores leading to a never ending cycle. So break the cycle
- * by deferring the retry of failed exclusive access (SCOND). The actual delay
- * needed is function of number of contending cores as well as the unrelated
- * coherency traffic from other cores. To keep the code simple, start off with
- * small delay of 1 which would suffice most cases and in case of contention
- * double the delay. Eventually the delay is sufficient such that the coherency
- * pipeline is drained, thus a subsequent exclusive access would succeed.
- */
-
-#define SCOND_FAIL_RETRY_VAR_DEF                                               \
-       unsigned int delay, tmp;                                                \
-
-#define SCOND_FAIL_RETRY_ASM                                                   \
-       "   ; --- scond fail delay ---          \n"                             \
-       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
-       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
-       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
-       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
-       "       b       1b                      \n"     /* start over */        \
-       "                                       \n"                             \
-       "4: ; --- done ---                      \n"                             \
-
-#define SCOND_FAIL_RETRY_VARS                                                  \
-         ,[delay] "=&r" (delay), [tmp] "=&r"   (tmp)                           \
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-       unsigned int val;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[slock]]      \n"
-       "       breq    %[val], %[LOCKED], 0b   \n"     /* spin while LOCKED */
-       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
-       "       bz      4f                      \n"     /* done */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val)
-         SCOND_FAIL_RETRY_VARS
-       : [slock]       "r"     (&(lock->slock)),
-         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-       unsigned int val, got_it = 0;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[slock]]      \n"
-       "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
-       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
-       "       bz.d    4f                      \n"
-       "       mov.z   %[got_it], 1            \n"     /* got it */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val),
-         [got_it]      "+&r"   (got_it)
-         SCOND_FAIL_RETRY_VARS
-       : [slock]       "r"     (&(lock->slock)),
-         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
-       : "memory", "cc");
-
-       smp_mb();
-
-       return got_it;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-       smp_mb();
-
-       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
-
-       smp_mb();
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers but only one writer.
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
- */
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       /*
-        * zero means writer holds the lock exclusively, deny Reader.
-        * Otherwise grant lock to first/subseq reader
-        *
-        *      if (rw->counter > 0) {
-        *              rw->counter--;
-        *              ret = 1;
-        *      }
-        */
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brls    %[val], %[WR_LOCKED], 0b\n"     /* <= 0: spin while write locked */
-       "       sub     %[val], %[val], 1       \n"     /* reader lock */
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz      4f                      \n"     /* done */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-       unsigned int val, got_it = 0;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
-       "       sub     %[val], %[val], 1       \n"     /* counter-- */
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz.d    4f                      \n"
-       "       mov.z   %[got_it], 1            \n"     /* got it */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val),
-         [got_it]      "+&r"   (got_it)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-
-       return got_it;
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       /*
-        * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
-        * deny writer. Otherwise if unlocked grant to writer
-        * Hence the claim that Linux rwlocks are unfair to writers.
-        * (can be starved for an indefinite time by readers).
-        *
-        *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
-        *              rw->counter = 0;
-        *              ret = 1;
-        *      }
-        */
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brne    %[val], %[UNLOCKED], 0b \n"     /* while !UNLOCKED spin */
-       "       mov     %[val], %[WR_LOCKED]    \n"
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz      4f                      \n"
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-/* 1 - lock taken successfully */
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-       unsigned int val, got_it = 0;
-       SCOND_FAIL_RETRY_VAR_DEF;
-
-       smp_mb();
-
-       __asm__ __volatile__(
-       "0:     mov     %[delay], 1             \n"
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
-       "       mov     %[val], %[WR_LOCKED]    \n"
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bz.d    4f                      \n"
-       "       mov.z   %[got_it], 1            \n"     /* got it */
-       "                                       \n"
-       SCOND_FAIL_RETRY_ASM
-
-       : [val]         "=&r"   (val),
-         [got_it]      "+&r"   (got_it)
-         SCOND_FAIL_RETRY_VARS
-       : [rwlock]      "r"     (&(rw->counter)),
-         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
-         [WR_LOCKED]   "ir"    (0)
-       : "memory", "cc");
-
-       smp_mb();
-
-       return got_it;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-
-       smp_mb();
-
-       /*
-        * rw->counter++;
-        */
-       __asm__ __volatile__(
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       add     %[val], %[val], 1       \n"
-       "       scond   %[val], [%[rwlock]]     \n"
-       "       bnz     1b                      \n"
-       "                                       \n"
-       : [val]         "=&r"   (val)
-       : [rwlock]      "r"     (&(rw->counter))
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-       unsigned int val;
-
-       smp_mb();
-
-       /*
-        * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
-        */
-       __asm__ __volatile__(
-       "1:     llock   %[val], [%[rwlock]]     \n"
-       "       scond   %[UNLOCKED], [%[rwlock]]\n"
-       "       bnz     1b                      \n"
-       "                                       \n"
-       : [val]         "=&r"   (val)
-       : [rwlock]      "r"     (&(rw->counter)),
-         [UNLOCKED]    "r"     (__ARCH_RW_LOCK_UNLOCKED__)
-       : "memory", "cc");
-
-       smp_mb();
-}
-
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
-
-#endif /* CONFIG_ARC_STAR_9000923308 */
-
 #else  /* !CONFIG_ARC_HAS_LLSC */
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
index 3af67455659af49d9f9d53b67ac26024cf3b77b9..2d79e527fa50a305e932259d336423d9d89c3fae 100644 (file)
@@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
 
 /*
  * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
  * syscall, so all that reamins to be tested is _TIF_WORK_MASK
  */
 
index d1da6032b715a7fea35d71fcedeeeb8cdfe590e4..a78d5670884f32490d2341b2468bf606258b6654 100644 (file)
@@ -32,7 +32,7 @@
 #define __kernel_ok            (segment_eq(get_fs(), KERNEL_DS))
 
 /*
- * Algorthmically, for __user_ok() we want do:
+ * Algorithmically, for __user_ok() we want do:
  *     (start < TASK_SIZE) && (start+len < TASK_SIZE)
  * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
  * emitted directly in code.
index 095599a73195c35bd6aabbc301bfe8dcafd3d52a..71f3918b0fc304bd0eeadb024bcbf9fa3ba32418 100644 (file)
@@ -74,7 +74,7 @@
        __tmp ^ __in;                                           \
 })
 
-#elif (ARC_BSWAP_TYPE == 2)    /* Custom single cycle bwap instruction */
+#elif (ARC_BSWAP_TYPE == 2)    /* Custom single cycle bswap instruction */
 
 #define __arch_swab32(x)                                               \
 ({                                                                     \
index 0cb0abaa0479e53ab1ea7ef8e45ab0cd42d2e965..98812c1248dfaf85b28ed023287ae78f27286073 100644 (file)
@@ -91,27 +91,13 @@ VECTOR   mem_service             ; 0x8, Mem exception   (0x1)
 VECTOR   instr_service           ; 0x10, Instrn Error   (0x2)
 
 ; ******************** Device ISRs **********************
-#ifdef CONFIG_ARC_IRQ3_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-VECTOR   handle_interrupt_level1
-
-#ifdef CONFIG_ARC_IRQ5_LV2
-VECTOR   handle_interrupt_level2
-#else
-VECTOR   handle_interrupt_level1
-#endif
-
-#ifdef CONFIG_ARC_IRQ6_LV2
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
 VECTOR   handle_interrupt_level2
 #else
 VECTOR   handle_interrupt_level1
 #endif
 
-.rept   25
+.rept   28
 VECTOR   handle_interrupt_level1 ; Other devices
 .endr
 
index c5cceca36118744f8c06359bc060f250974313df..ce9deb953ca90e1a0309ccd69f9608e8fd32cf70 100644 (file)
@@ -28,10 +28,8 @@ void arc_init_IRQ(void)
 {
        int level_mask = 0;
 
-       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
-       level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;
+       /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
+       level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
 
        /*
         * Write to register, even if no LV2 IRQs configured to reset it
index 6fd48021324b50acd326a4ce9a17031f05db7609..08f03d9b5b3e544fcaf3e1696b8ea8e87cb943af 100644 (file)
@@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event,
        int64_t delta = new_raw_count - prev_raw_count;
 
        /*
-        * We don't afaraid of hwc->prev_count changing beneath our feet
+        * We aren't afraid of hwc->prev_count changing beneath our feet
         * because there's no way for us to re-enter this function anytime.
         */
        local64_set(&hwc->prev_count, new_raw_count);
index f63b8bfefb0ca789dcbac3ad0459dfc4b3e98760..2ee7a4d758a882a2b4e36cacd74adb280648fced 100644 (file)
@@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p)
                /*
                 * If we are here, it is established that @uboot_arg didn't
                 * point to DT blob. Instead if u-boot says it is cmdline,
-                * Appent to embedded DT cmdline.
+                * append to embedded DT cmdline.
                 * setup_machine_fdt() would have populated @boot_command_line
                 */
                if (uboot_tag == 1) {
index 004b7f0bc76cc58c6988547df1bb8705cd36004d..6cb3736b6b83613a95180cd3e9cb5ba12a9b2f7f 100644 (file)
@@ -34,7 +34,7 @@
  *  -ViXS were still seeing crashes when using insmod to load drivers.
  *   It turned out that the code to change Execute permssions for TLB entries
  *   of user was not guarded for interrupts (mod_tlb_permission)
- *   This was cauing TLB entries to be overwritten on unrelated indexes
+ *   This was causing TLB entries to be overwritten on unrelated indexes
  *
  * Vineetg: July 15th 2008: Bug #94183
  *  -Exception happens in Delay slot of a JMP, and before user space resumes,
index e0efff15a5aece29f9bf8ca45b95f856fc206eab..b9192a653b7e3a2a2a34c8ddaed1dfad037f30c0 100644 (file)
@@ -142,7 +142,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
         * prelogue is setup (callee regs saved and then fp set and not other
         * way around
         */
-       pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+       pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
        return 0;
 
 #endif
index a6f91e88ce36e3ea2a2c95d8eabdceffa48be7fc..934150e7ac4895ef9e4523fd1ffabbdacce2dd74 100644 (file)
@@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-/* called on user read(): display the couters */
+/* called on user read(): display the counters */
 static ssize_t tlb_stats_output(struct file *file,     /* file descriptor */
                                char __user *user_buf,  /* user buffer */
                                size_t len,             /* length of buffer */
index 9e5eddbb856f1f32dd25f4b13132dd527725249c..5a294b2c3cb307ac591293afae8c8a72409c97c5 100644 (file)
@@ -215,7 +215,7 @@ slc_chk:
  * ------------------
  * This ver of MMU supports variable page sizes (1k-16k): although Linux will
  * only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * However from hardware perspective, smaller page sizes aggravate aliasing
  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
  * the existing scheme of piggybacking won't work for certain configurations.
  * Two new registers IC_PTAG and DC_PTAG inttoduced.
@@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
 
        /*
         * This is technically for MMU v4, using the MMU v3 programming model
-        * Special work for HS38 aliasing I-cache configuratino with PAE40
+        * Special work for HS38 aliasing I-cache configuration with PAE40
         *   - upper 8 bits of paddr need to be written into PTAG_HI
         *   - (and needs to be written before the lower 32 bits)
         * Note that PTAG_HI is hoisted outside the line loop
@@ -936,7 +936,7 @@ void arc_cache_init(void)
                              ic->ver, CONFIG_ARC_MMU_VER);
 
                /*
-                * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
+                * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
                 * pair to provide vaddr/paddr respectively, just as in MMU v3
                 */
                if (is_isa_arcv2() && ic->alias)
index 8c8e36fa5659411c6705e46d538e7c506fc42c96..73d7e4c75b7dbc9140604b334ab5e22a8cdd7a8d 100644 (file)
@@ -10,7 +10,7 @@
  * DMA Coherent API Notes
  *
  * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessintg it using a kernel virtual address, with
+ * implemented by accessing it using a kernel virtual address, with
  * Cache bit off in the TLB entry.
  *
  * The default DMA address == Phy address which is 0x8000_0000 based.
index 06b6c2d695bfb6bfbb0a61bbfffd958a58bf20d3..414b42710a366fc0f5708765d4ff0786e53a2b5b 100644 (file)
@@ -741,6 +741,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
        sun7i-a20-olimex-som-evb.dtb \
        sun7i-a20-olinuxino-lime.dtb \
        sun7i-a20-olinuxino-lime2.dtb \
+       sun7i-a20-olinuxino-lime2-emmc.dtb \
        sun7i-a20-olinuxino-micro.dtb \
        sun7i-a20-orangepi.dtb \
        sun7i-a20-orangepi-mini.dtb \
index d82dd6e3f9b1dda71783ff52cc495c45ed4578c4..5687d6b4da60c12061a425562659bc4b2306f875 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c0_pins>;
-       clock-frequency = <400000>;
+       clock-frequency = <100000>;
 
        tps@24 {
                compatible = "ti,tps65218";
index b01a5948cdd0434ce00d1d5f8da0ab059c62026b..0e63b9dff6e7d717f190886d24f5175e8510489e 100644 (file)
 
                tps659038_pmic {
                        compatible = "ti,tps659038-pmic";
+
+                       smps12-in-supply = <&vmain>;
+                       smps3-in-supply = <&vmain>;
+                       smps45-in-supply = <&vmain>;
+                       smps6-in-supply = <&vmain>;
+                       smps7-in-supply = <&vmain>;
+                       smps8-in-supply = <&vmain>;
+                       smps9-in-supply = <&vmain>;
+                       ldo1-in-supply = <&vmain>;
+                       ldo2-in-supply = <&vmain>;
+                       ldo3-in-supply = <&vmain>;
+                       ldo4-in-supply = <&vmain>;
+                       ldo9-in-supply = <&vmain>;
+                       ldoln-in-supply = <&vmain>;
+                       ldousb-in-supply = <&vmain>;
+                       ldortc-in-supply = <&vmain>;
+
                        regulators {
                                smps12_reg: smps12 {
                                        /* VDD_MPU */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps12";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1250000>;
@@ -73,7 +89,6 @@
 
                                smps3_reg: smps3 {
                                        /* VDD_DDR EMIF1 EMIF2 */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps3";
                                        regulator-min-microvolt = <1350000>;
                                        regulator-max-microvolt = <1350000>;
@@ -84,7 +99,6 @@
                                smps45_reg: smps45 {
                                        /* VDD_DSPEVE on AM572 */
                                        /* VDD_IVA + VDD_DSP on AM571 */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps45";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1250000>;
 
                                smps6_reg: smps6 {
                                        /* VDD_GPU */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps6";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1250000>;
 
                                smps7_reg: smps7 {
                                        /* VDD_CORE */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps7";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1150000>;
                                smps8_reg: smps8 {
                                        /* 5728 - VDD_IVAHD */
                                        /* 5718 - N.C. test point */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps8";
                                };
 
                                smps9_reg: smps9 {
                                        /* VDD_3_3D */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps9";
                                        regulator-min-microvolt = <3300000>;
                                        regulator-max-microvolt = <3300000>;
                                ldo1_reg: ldo1 {
                                        /* VDDSHV8 - VSDMMC  */
                                        /* NOTE: on rev 1.3a, data supply */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo1";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <3300000>;
 
                                ldo2_reg: ldo2 {
                                        /* VDDSH18V */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo2";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
 
                                ldo3_reg: ldo3 {
                                        /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo3";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
 
                                ldo4_reg: ldo4 {
                                        /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo4";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
 
                                ldo9_reg: ldo9 {
                                        /* VDD_RTC  */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo9";
                                        regulator-min-microvolt = <840000>;
                                        regulator-max-microvolt = <1160000>;
 
                                ldoln_reg: ldoln {
                                        /* VDDA_1V8_PLL */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldoln";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
 
                                ldousb_reg: ldousb {
                                        /* VDDA_3V_USB: VDDA_USBHS33 */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldousb";
                                        regulator-min-microvolt = <3300000>;
                                        regulator-max-microvolt = <3300000>;
 
                                ldortc_reg: ldortc {
                                        /* VDDA_RTC  */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldortc";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
index 8450944b28e6bb7728ccd731dcb709fa7346aa28..22f7a13e20b40e094611a10953180fd1679636cd 100644 (file)
@@ -58,8 +58,8 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
-                         MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
+                         MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
 
                internal-regs {
 
index cbc17b0794b102fb2f0862958203c19c1fe79dae..4128fa91823c98c6fb94870ccb2e5b69c31f35e0 100644 (file)
        };
 };
 
+&mmc1 {
+        status = "disabled";
+};
+
 &mmc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd1_pins>;
        cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
 };
 
+&mmc3 {
+        status = "disabled";
+};
+
 &pincntl {
        sd1_pins: pinmux_sd1_pins {
                pinctrl-single,pins = <
index 5d4313fd5a46012aac394ee2b6210af1d8fd6ee6..3f184863e0c52192d7a01a8f24ad3637b1229556 100644 (file)
        phy-mode = "rgmii";
 };
 
+&mmc1 {
+       status = "disabled";
+};
+
+&mmc2 {
+       status = "disabled";
+};
+
 &mmc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd2_pins>;
@@ -53,6 +61,7 @@
        dmas = <&edma_xbar 8 0 1        /* use SDTXEVT1 instead of MCASP0TX */
                &edma_xbar 9 0 2>;      /* use SDRXEVT1 instead of MCASP0RX */
        dma-names = "tx", "rx";
+       non-removable;
 };
 
 &pincntl {
index e0074014385a50d930a2058a74f3a3a4f8428ec3..3a8f3976f6f9677874f46d0491653856a444a6e6 100644 (file)
                        ti,hwmods = "gpmc";
                        reg = <0x50000000 0x37c>;      /* device IO registers */
                        interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+                       dmas = <&edma_xbar 4 0>;
+                       dma-names = "rxtx";
                        gpmc,num-cs = <8>;
                        gpmc,num-waitpins = <2>;
                        #address-cells = <2>;
index 4220eeffc65ae806892f353efce4eab400a9cdb9..5e06020f450bba5f78d0df064731dc1d55ce5ec9 100644 (file)
        reg = <0x58000000 0x80>,
              <0x58004054 0x4>,
              <0x58004300 0x20>,
-             <0x58005054 0x4>,
-             <0x58005300 0x20>;
+             <0x58009054 0x4>,
+             <0x58009300 0x20>;
        reg-names = "dss", "pll1_clkctrl", "pll1",
                    "pll2_clkctrl", "pll2";
 
index ddfe1f558c105a1b962cce3ca69e0966021a01da..fa14f77df5632709548958522d83ddb81401d0d0 100644 (file)
        hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
 
        ports {
-               port0 {
+               port {
                        dp_out: endpoint {
                                remote-endpoint = <&bridge_in>;
                        };
                edid-emulation = <5>;
 
                ports {
-                       port0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+
                                bridge_out: endpoint {
                                        remote-endpoint = <&panel_in>;
                                };
                        };
 
-                       port1 {
+                       port@1 {
+                               reg = <1>;
+
                                bridge_in: endpoint {
                                        remote-endpoint = <&dp_out>;
                                };
index f9d2e4f1a0e09598de0ad852bec24996b7373c56..1de972d46a87f31abc814991b2517d4bf75a59f1 100644 (file)
        hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
 
        ports {
-               port0 {
+               port {
                        dp_out: endpoint {
                                remote-endpoint = <&bridge_in>;
                        };
                use-external-pwm;
 
                ports {
-                       port0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+
                                bridge_out: endpoint {
                                        remote-endpoint = <&panel_in>;
                                };
                        };
 
-                       port1 {
+                       port@1 {
+                               reg = <1>;
+
                                bridge_in: endpoint {
                                        remote-endpoint = <&dp_out>;
                                };
index 76056ba92cedc038933a4367a55ba68af9786d4b..ed449827c3d358512f4004f1555d279bbe59267a 100644 (file)
@@ -85,7 +85,7 @@
                        OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
                        OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
                        OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
-                       OMAP3_CORE1_IOPAD(0x215e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+                       OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
                        OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
                        OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
                >;
index 41f5d386f21f31c1ac02cc091d9e318097a3e211..f4f2ce46d681cd0ed7872dccea21eb6523dbd275 100644 (file)
        vmmc-supply = <&vmmc1>;
        vmmc_aux-supply = <&vsim>;
        bus-width = <4>;
+       cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
 };
 
 &mmc3 {
index d6f839cab6499acc72cd5b93733aa967defe9d09..b6971060648a5f148d3bf7fce8b1aaf08ed3f264 100644 (file)
                        OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
                >;
        };
+
+       mmc1_wp_pins: pinmux_mmc1_cd_pins {
+               pinctrl-single,pins = <
+                       OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4)   /* etk_d15.gpio_29 */
+               >;
+       };
 };
 
 &i2c3 {
                };
        };
 };
+
+&mmc1 {
+       pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>;
+       wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */
+};
index d9e2d9c6e999e4aa6f3a4c8f582fe9caf74953f4..2b74a81d1de266e360c558f5bf5b29b248c400bc 100644 (file)
                pinctrl-single,pins = <
                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1)               /* ssi1_flag_tx */
-                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4)                /* ssi1_wake_tx (cawake) */
                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1)               /* ssi1_dat_tx */
                        OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1)                /* ssi1_dat_rx */
                        OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1)                /* ssi1_flag_rx */
        modem_pins: pinmux_modem {
                pinctrl-single,pins = <
                        OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4)               /* gpio 70 => cmt_apeslpx */
-                       OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio 72 => ape_rst_rq */
+                       OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4)                /* gpio 72 => ape_rst_rq */
                        OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4)               /* gpio 73 => cmt_rst_rq */
                        OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4)               /* gpio 74 => cmt_en */
                        OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4)               /* gpio 75 => cmt_rst */
index a00ca761675d446c148ffc50c8d0d189cb1f241f..927b17fc4ed885634320480a2527131ec8f451f1 100644 (file)
@@ -97,7 +97,7 @@
                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1)            /* ssi1_dat_tx */
                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1)            /* ssi1_flag_tx */
                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1)      /* ssi1_rdy_tx */
-                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4)        /* ssi1_wake_tx (cawake) */
                        OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1)             /* ssi1_dat_rx */
                        OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1)             /* ssi1_flag_rx */
                        OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1)            /* ssi1_rdy_rx */
                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7)            /* ssi1_dat_tx */
                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7)            /* ssi1_flag_tx */
                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7)    /* ssi1_rdy_tx */
-                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4)        /* ssi1_wake_tx (cawake) */
                        OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7)             /* ssi1_dat_rx */
                        OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7)             /* ssi1_flag_rx */
                        OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4)            /* ssi1_rdy_rx */
 
        modem_pins1: pinmux_modem_core1_pins {
                pinctrl-single,pins = <
-                       OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
+                       OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | MUX_MODE4)        /* gpio_34 (ape_rst_rq) */
                        OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4)            /* gpio_88 (cmt_rst_rq) */
                        OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4)            /* gpio_93 (cmt_apeslpx) */
                >;
index f19170bdcc1fa0f382f2305112daa457ff528e6c..c29b41dc7b95a19213c7e514cd3371e06c87230f 100644 (file)
@@ -98,7 +98,7 @@
                pinctrl-single,pins = <
                         OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0)        /* uart2_cts.uart2_cts */
                         OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0)              /* uart2_rts.uart2_rts */
-                        OMAP3_CORE1_IOPAD(0x217a, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
+                        OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0)               /* uart2_rx.uart2_rx */
                         OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0)              /* uart2_tx.uart2_tx */
                >;
        };
                pinctrl-single,pins = <
                         OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0)      /* uart3_cts_rctx.uart3_cts_rctx */
                         OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0)              /* uart3_rts_sd.uart3_rts_sd */
-                        OMAP3_CORE1_IOPAD(0x219e, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+                        OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0)               /* uart3_rx_irrx.uart3_rx_irrx */
                         OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0)              /* uart3_tx_irtx.uart3_tx_irtx */
                >;
        };
                pinctrl-single,pins = <
                        OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_clk.sdmmc3_clk */
                        OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d4.sdmmc3_dat0 */
-                       OMAP3630_CORE2_IOPAD(0x25e6, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
+                       OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d5.sdmmc3_dat1 */
                        OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d6.sdmmc3_dat2 */
                        OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d3.sdmmc3_dat3 */
                >;
index dc759a3028b790d1b525ee889077c5cca0ce2b8d..5d5b620b7d9b70b6529e4b657641fd4434c88bc5 100644 (file)
                display0 = &hdmi0;
        };
 
+       vmain: fixedregulator-vmain {
+               compatible = "regulator-fixed";
+               regulator-name = "vmain";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       vsys_cobra: fixedregulator-vsys_cobra {
+               compatible = "regulator-fixed";
+               regulator-name = "vsys_cobra";
+               vin-supply = <&vmain>;
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       vdds_1v8_main: fixedregulator-vdds_1v8_main {
+               compatible = "regulator-fixed";
+               regulator-name = "vdds_1v8_main";
+               vin-supply = <&smps7_reg>;
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
        vmmcsd_fixed: fixedregulator-mmcsd {
                compatible = "regulator-fixed";
                regulator-name = "vmmcsd_fixed";
 
        wlcore_irq_pin: pinmux_wlcore_irq_pin {
                pinctrl-single,pins = <
-                       OMAP5_IOPAD(0x40, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6)     /* llia_wakereqin.gpio1_wk14 */
+                       OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
                >;
        };
 };
 
                        ti,ldo6-vibrator;
 
+                       smps123-in-supply = <&vsys_cobra>;
+                       smps45-in-supply = <&vsys_cobra>;
+                       smps6-in-supply = <&vsys_cobra>;
+                       smps7-in-supply = <&vsys_cobra>;
+                       smps8-in-supply = <&vsys_cobra>;
+                       smps9-in-supply = <&vsys_cobra>;
+                       smps10_out2-in-supply = <&vsys_cobra>;
+                       smps10_out1-in-supply = <&vsys_cobra>;
+                       ldo1-in-supply = <&vsys_cobra>;
+                       ldo2-in-supply = <&vsys_cobra>;
+                       ldo3-in-supply = <&vdds_1v8_main>;
+                       ldo4-in-supply = <&vdds_1v8_main>;
+                       ldo5-in-supply = <&vsys_cobra>;
+                       ldo6-in-supply = <&vdds_1v8_main>;
+                       ldo7-in-supply = <&vsys_cobra>;
+                       ldo8-in-supply = <&vsys_cobra>;
+                       ldo9-in-supply = <&vmmcsd_fixed>;
+                       ldoln-in-supply = <&vsys_cobra>;
+                       ldousb-in-supply = <&vsys_cobra>;
+
                        regulators {
                                smps123_reg: smps123 {
                                        /* VDD_OPP_MPU */
                pinctrl-0 = <&twl6040_pins>;
 
                interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
-               ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>;  /* gpio line 141 */
+
+               /* audpwron gpio defined in the board specific dts */
 
                vio-supply = <&smps7_reg>;
                v2v1-supply = <&smps9_reg>;
index 46ecb1dd3b5cb3dacefc99ff453093d5914a1b3c..f75ce02fb3983988ec7858630e0c3d93f589b5e6 100644 (file)
        };
 };
 
+/* LDO4 is VPP1 - ball AD9 */
+&ldo4_reg {
+       regulator-min-microvolt = <2000000>;
+       regulator-max-microvolt = <2000000>;
+};
+
+/*
+ * LDO7 is used for HDMI: VDDA_DSIPORTA - ball AA33, VDDA_DSIPORTC - ball AE33,
+ * VDDA_HDMI - ball AN25
+ */
+&ldo7_reg {
+       status = "okay";
+       regulator-min-microvolt = <1800000>;
+       regulator-max-microvolt = <1800000>;
+};
+
 &omap5_pmx_core {
        i2c4_pins: pinmux_i2c4_pins {
                pinctrl-single,pins = <
                <&gpio7 3 0>;           /* 195, SDA */
 };
 
+&twl6040 {
+       ti,audpwron-gpio = <&gpio5 16 GPIO_ACTIVE_HIGH>;  /* gpio line 144 */
+};
+
+&twl6040_pins {
+       pinctrl-single,pins = <
+               OMAP5_IOPAD(0x1c4, PIN_OUTPUT | MUX_MODE6)      /* mcspi1_somi.gpio5_144 */
+               OMAP5_IOPAD(0x1ca, PIN_OUTPUT | MUX_MODE6)      /* perslimbus2_clock.gpio5_145 */
+       >;
+};
index 60b3fbb3bf07ad384182e951e1329ca46a8cf1ab..a51e60518eb6bb2bcc0c82452743b0b7ac3c37c4 100644 (file)
                <&gpio9 1 GPIO_ACTIVE_HIGH>,    /* TCA6424A P00, LS OE */
                <&gpio7 1 GPIO_ACTIVE_HIGH>;    /* GPIO 193, HPD */
 };
+
+&twl6040 {
+       ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>;  /* gpio line 141 */
+};
+
+&twl6040_pins {
+       pinctrl-single,pins = <
+               OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6)      /* mcspi1_somi.gpio5_141 */
+       >;
+};
index a3601e4c0a2e3b4aecb249d5b9c74436911afe0d..b844473601d25085d0a200e6406c8f5bfc381f04 100644 (file)
 &gmac1 {
        status = "okay";
        phy-mode = "rgmii";
+       phy-handle = <&phy1>;
 
        snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>;
        snps,reset-active-low;
index ad8ba10764a330e2f328192bb7c4b9aa9dd4e0f3..d294e82447a292bb24e0989191494693f7ee0ac3 100644 (file)
                        compatible = "shared-dma-pool";
                        reg = <0x40000000 0x01000000>;
                        no-map;
+                       status = "disabled";
                };
 
                gp1_reserved: rproc@41000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x41000000 0x01000000>;
                        no-map;
+                       status = "disabled";
                };
 
                audio_reserved: rproc@42000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x42000000 0x01000000>;
                        no-map;
+                       status = "disabled";
                };
 
                dmu_reserved: rproc@43000000 {
index a03e56fb5dbc7882abb90fc71298bac1c3e8a56b..ca58eb279d5541631bcc3dea5e56ef87797071ea 100644 (file)
@@ -65,8 +65,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>, <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>,
+                                <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -74,8 +75,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>, <&ahb_gates 46>,
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>,
+                                <&ahb_gates 46>,
                                 <&dram_gates 25>, <&dram_gates 26>;
                        status = "disabled";
                };
@@ -84,9 +86,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_fe0-de_be0-lcd0";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
-                                <&ahb_gates 46>, <&dram_gates 25>,
-                                <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 44>, <&ahb_gates 46>,
+                                <&dram_gates 25>, <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -94,8 +96,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
-                       clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
-                                <&ahb_gates 44>, <&ahb_gates 46>,
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+                                <&ahb_gates 36>, <&ahb_gates 44>,
+                                <&ahb_gates 46>,
                                 <&dram_gates 5>, <&dram_gates 25>, <&dram_gates 26>;
                        status = "disabled";
                };
index bddd0de88af6be1d3e68b027b644a56e5e0ee61b..367f3301249364e7ad47e4c2e594ebfe6ecd88fc 100644 (file)
@@ -65,8 +65,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>;
                        status = "disabled";
                };
 
@@ -74,7 +74,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 44>;
                        status = "disabled";
                };
 
@@ -82,8 +83,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-tve0";
-                       clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
-                                <&ahb_gates 44>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+                                <&ahb_gates 36>, <&ahb_gates 44>;
                        status = "disabled";
                };
        };
index a8d8b4582397a2dee85929aa43d955efce5009f2..f694482bdeb64ccf1f21ffaa69d3fb3f1027ea0b 100644 (file)
@@ -52,7 +52,7 @@
 
 / {
        model = "NextThing C.H.I.P.";
-       compatible = "nextthing,chip", "allwinner,sun5i-r8";
+       compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
 
        aliases {
                i2c0 = &i2c0;
index 68b479b8772c6f3eadc16f2ad6c3d33e35d39c5f..73c133f5e79cdf9540174002656421c96dc68b62 100644 (file)
 };
 
 &reg_dc1sw {
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
        regulator-name = "vcc-lcd";
 };
 
index 360adfb1e9ca56a76094e170e6662d504e115f3c..d6ad6196a768bce9a28d1c7fcb149da32f21d5df 100644 (file)
 
 &reg_dc1sw {
        regulator-name = "vcc-lcd-usb2";
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
 };
 
 &reg_dc5ldo {
index febdf4c72fb013d3a2c222a20d4f30cf149d2513..2c34bbbb95700a4c1af75ffb44d92a312b927dea 100644 (file)
@@ -67,8 +67,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>, <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>,
+                                <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -76,8 +77,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
-                                <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 44>, <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -85,7 +86,7 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-tve0";
-                       clocks = <&pll5 1>,
+                       clocks = <&pll3>, <&pll5 1>,
                                 <&ahb_gates 34>, <&ahb_gates 36>, <&ahb_gates 44>,
                                 <&dram_gates 5>, <&dram_gates 26>;
                        status = "disabled";
                pll3x2: pll3x2_clk {
                        #clock-cells = <0>;
                        compatible = "fixed-factor-clock";
+                       clocks = <&pll3>;
                        clock-div = <1>;
                        clock-mult = <2>;
                        clock-output-names = "pll3-2x";
                pll7x2: pll7x2_clk {
                        #clock-cells = <0>;
                        compatible = "fixed-factor-clock";
+                       clocks = <&pll7>;
                        clock-div = <1>;
                        clock-mult = <2>;
                        clock-output-names = "pll7-2x";
index 1eca3b28ac6480e8b1d628cfebfb2c58ebd24280..b6da15d823a61bcbc8d77e6c7d295306795cef09 100644 (file)
 
                                ldo5_reg: ldo5 {
                                        regulator-name = "vddio_sdmmc,avdd_vdac";
-                                       regulator-min-microvolt = <3300000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <3300000>;
                                        regulator-always-on;
                                };
 
        sdhci@78000000 {
                status = "okay";
+               vqmmc-supply = <&ldo5_reg>;
                cd-gpios = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_LOW>;
                wp-gpios = <&gpio TEGRA_GPIO(T, 3) GPIO_ACTIVE_HIGH>;
                power-gpios = <&gpio TEGRA_GPIO(D, 7) GPIO_ACTIVE_HIGH>;
index 10f49ab5328eee63acce7d5d9fe9081bd1cf76a4..47195e8690b4ed9c8b7c9937cb62ef7fb8ab795b 100644 (file)
@@ -82,6 +82,7 @@ CONFIG_TOUCHSCREEN_MMS114=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MAX77693_HAPTIC=y
 CONFIG_INPUT_MAX8997_HAPTIC=y
+CONFIG_KEYBOARD_SAMSUNG=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
index 8f857564657f13482fe0277840b931d369f20eb6..8a5fff1b7f6f9541578d37e4bf03f8eb3e4a003a 100644 (file)
@@ -264,6 +264,7 @@ CONFIG_KEYBOARD_TEGRA=y
 CONFIG_KEYBOARD_SPEAR=y
 CONFIG_KEYBOARD_ST_KEYSCAN=y
 CONFIG_KEYBOARD_CROS_EC=m
+CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_MOUSE_CYAPA=m
 CONFIG_MOUSE_ELAN_I2C=y
index 19cfab526d13a37dd33d476f1270179fc4fe5d5c..20febb36884424a0b23984c776e23abab3509a40 100644 (file)
@@ -29,7 +29,7 @@
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+       return (pmd_t *)get_zeroed_page(GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index aeddd28b3595515aa70fb849bd65179a7b800443..92fd2c8a9af0638834d6c2b5814b9a88911f33fe 100644 (file)
@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 
 #define pmd_large(pmd)         (pmd_val(pmd) & 2)
 #define pmd_bad(pmd)           (pmd_val(pmd) & 2)
+#define pmd_present(pmd)       (pmd_val(pmd))
 
 #define copy_pmd(pmdpd,pmdps)          \
        do {                            \
index fa70db7c714b605ff98e4efeb264c944214aabab..2a029bceaf2f8593788dea27ec05f3664fc9733a 100644 (file)
@@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
                                                : !!(pmd_val(pmd) & (val)))
 #define pmd_isclear(pmd, val)  (!(pmd_val(pmd) & (val)))
 
+#define pmd_present(pmd)       (pmd_isset((pmd), L_PMD_SECT_VALID))
 #define pmd_young(pmd)         (pmd_isset((pmd), PMD_SECT_AF))
 #define pte_special(pte)       (pte_isset((pte), L_PTE_SPECIAL))
 static inline pte_t pte_mkspecial(pte_t pte)
@@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
 #define pfn_pmd(pfn,prot)      (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
 #define mk_pmd(page,prot)      pfn_pmd(page_to_pfn(page),prot)
 
-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 {
-       return __pmd(0);
+       return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
 }
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
index 348caabb7625ee7b12fc0ddd8e2546b22f338b1c..d62204060cbe7c6239a7dc8725918fc43a7b4203 100644 (file)
@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 #define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 
 #define pmd_none(pmd)          (!pmd_val(pmd))
-#define pmd_present(pmd)       (pmd_val(pmd))
 
 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 {
index ef9119f7462ea11550fc0e6edaa756d9edc39905..4d9375814b538e096b57f76d947916cb3ce92b7a 100644 (file)
@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
        if (ret)
                return ret;
 
-       vfp_flush_hwstate(thread);
        thread->vfpstate.hard = new_vfp;
+       vfp_flush_hwstate(thread);
 
        return 0;
 }
index df90bc59bfce2893328c3c79e12658c106897c90..861521606c6d6755d7802f7656e47d7e60f269ed 100644 (file)
@@ -486,7 +486,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
-       trace_ipi_raise(target, ipi_types[ipinr]);
+       trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
        __smp_cross_call(target, ipinr);
 }
 
index 893941ec98dc6da787629068359275a863516056..f1bde7c4e736de72253b8e8ae1419688d910b098 100644 (file)
@@ -263,6 +263,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
        kvm_timer_vcpu_terminate(vcpu);
        kvm_vgic_vcpu_destroy(vcpu);
        kvm_pmu_vcpu_destroy(vcpu);
+       kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
 
index e65aa7d11b20971493a730f8a47754bdf3f484cc..20dcf6e904b2c5ea40c21a0903a73bc58b16860e 100644 (file)
@@ -61,7 +61,6 @@ config ARCH_EXYNOS4
        select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
        select CPU_EXYNOS4210
        select GIC_NON_BANKED
-       select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
        select MIGHT_HAVE_CACHE_L2X0
        help
          Samsung EXYNOS4 (Cortex-A9) SoC based systems
index a38b16b699233b2b0948250efe32a030388b2e2a..b56de4b8cdf2dde23f7329a03d081c963c568701 100644 (file)
@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
 static void __init imx6ul_enet_phy_init(void)
 {
        if (IS_BUILTIN(CONFIG_PHYLIB))
-               phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
+               phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
                                           ksz8081_phy_fixup);
 }
 
index ecf9e0c3b107808752fe49af062b8758e1784986..e53c6cfcab51cd12c798fd11d663686c77761b02 100644 (file)
@@ -7,9 +7,15 @@ CFLAGS_pmsu.o                  := -march=armv7-a
 obj-$(CONFIG_MACH_MVEBU_ANY)    += system-controller.o mvebu-soc-id.o
 
 ifeq ($(CONFIG_MACH_MVEBU_V7),y)
-obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o pm.o pm-board.o
+obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
+
+obj-$(CONFIG_PM)                += pm.o pm-board.o
 obj-$(CONFIG_SMP)               += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
 endif
 
 obj-$(CONFIG_MACH_DOVE)                 += dove.o
-obj-$(CONFIG_MACH_KIRKWOOD)     += kirkwood.o kirkwood-pm.o
+
+ifeq ($(CONFIG_MACH_KIRKWOOD),y)
+obj-y                           += kirkwood.o
+obj-$(CONFIG_PM)                += kirkwood-pm.o
+endif
index 7e989d61159c384df1de62b27156ddf22d8fa44f..e80f0dde218919dab8d7a2b5873a4962755f3ee2 100644 (file)
@@ -162,22 +162,16 @@ exit:
 }
 
 /*
- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
- * is needed as a workaround for a deadlock issue between the PCIe
- * interface and the cache controller.
+ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
+ * needed for the HW I/O coherency mechanism to work properly without
+ * deadlock.
  */
 static void __iomem *
-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
-                             unsigned int mtype, void *caller)
+armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+                        unsigned int mtype, void *caller)
 {
-       struct resource pcie_mem;
-
-       mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
-
-       if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
-               mtype = MT_UNCACHED;
-
+       mtype = MT_UNCACHED;
        return __arm_ioremap_caller(phys_addr, size, mtype, caller);
 }
 
@@ -186,7 +180,8 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
        struct device_node *cache_dn;
 
        coherency_cpu_base = of_iomap(np, 0);
-       arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
+       arch_ioremap_caller = armada_wa_ioremap_caller;
+       pci_ioremap_set_mem_type(MT_UNCACHED);
 
        /*
         * We should switch the PL310 to I/O coherency mode only if
index 5d7fb596bf4afb6d14b7c835cad0cbc38b827869..bf608441b35731e13b99fb7012bdc89ba005c168 100644 (file)
@@ -43,8 +43,8 @@
 #define OTHERS_MASK                    (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
 
 /* IRQ handler register bitmasks */
-#define DEFERRED_FIQ_MASK              (0x1 << (INT_DEFERRED_FIQ % IH2_BASE))
-#define GPIO_BANK1_MASK                (0x1 << INT_GPIO_BANK1)
+#define DEFERRED_FIQ_MASK              OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
+#define GPIO_BANK1_MASK                OMAP_IRQ_BIT(INT_GPIO_BANK1)
 
 /* Driver buffer byte offsets */
 #define BUF_MASK                       (FIQ_MASK * 4)
@@ -110,7 +110,7 @@ ENTRY(qwerty_fiqin_start)
        mov r8, #2                              @ reset FIQ agreement
        str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
 
-       cmp r10, #INT_GPIO_BANK1                @ is it GPIO bank interrupt?
+       cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY)     @ is it GPIO interrupt?
        beq gpio                                @ yes - process it
 
        mov r8, #1
index d1f12095f3155d37ac72e61bc73d9cf2b84dcc2e..ec760ae2f91794ab58277dbfb8e7ea23255861f1 100644 (file)
@@ -109,7 +109,8 @@ void __init ams_delta_init_fiq(void)
         * Since no set_type() method is provided by OMAP irq chip,
         * switch to edge triggered interrupt type manually.
         */
-       offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4;
+       offset = IRQ_ILR0_REG_OFFSET +
+                       ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4;
        val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
        omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
 
@@ -149,7 +150,7 @@ void __init ams_delta_init_fiq(void)
        /*
         * Redirect GPIO interrupts to FIQ
         */
-       offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4;
+       offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
        val = omap_readl(OMAP_IH1_BASE + offset) | 1;
        omap_writel(val, OMAP_IH1_BASE + offset);
 }
index adb5e764965991e6b9f15a7570104379b314bac9..6dfc3e1210a34d5771d781322f079117f61f91cf 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef __AMS_DELTA_FIQ_H
 #define __AMS_DELTA_FIQ_H
 
+#include <mach/irqs.h>
+
 /*
  * Interrupt number used for passing control from FIQ to IRQ.
  * IRQ12, described as reserved, has been selected.
index 0517f0c1581a71091539c5c1abe47473f4b789ef..1a648e9dfaa08fb3647414a08c03650c114b0396 100644 (file)
@@ -17,6 +17,7 @@ config ARCH_OMAP3
        select PM_OPP if PM
        select PM if CPU_IDLE
        select SOC_HAS_OMAP2_SDRC
+       select ARM_ERRATA_430973
 
 config ARCH_OMAP4
        bool "TI OMAP4"
@@ -36,6 +37,7 @@ config ARCH_OMAP4
        select PM if CPU_IDLE
        select ARM_ERRATA_754322
        select ARM_ERRATA_775420
+       select OMAP_INTERCONNECT
 
 config SOC_OMAP5
        bool "TI OMAP5"
@@ -67,6 +69,8 @@ config SOC_AM43XX
        select HAVE_ARM_SCU
        select GENERIC_CLOCKEVENTS_BROADCAST
        select HAVE_ARM_TWD
+       select ARM_ERRATA_754322
+       select ARM_ERRATA_775420
 
 config SOC_DRA7XX
        bool "TI DRA7XX"
@@ -240,4 +244,12 @@ endmenu
 
 endif
 
+config OMAP5_ERRATA_801819
+       bool "Errata 801819: An eviction from L1 data cache might stall indefinitely"
+       depends on SOC_OMAP5 || SOC_DRA7XX
+       help
+         A livelock can occur in the L2 cache arbitration that might prevent
+         a snoop from completing. Under certain conditions this can cause the
+         system to deadlock.
+
 endmenu
index d9c3ffc393291ea6da82dc4886e5ca5e2efb4db8..390795b334c3a252e197a5ef14a5848548150320 100644 (file)
@@ -39,7 +39,7 @@
 #include "gpmc.h"
 #include "gpmc-smsc911x.h"
 
-#include <video/omapdss.h>
+#include <linux/platform_data/omapdss.h>
 #include <video/omap-panel-data.h>
 
 #include "board-flash.h"
@@ -47,6 +47,7 @@
 #include "hsmmc.h"
 #include "control.h"
 #include "common-board-devices.h"
+#include "display.h"
 
 #define LDP_SMSC911X_CS                1
 #define LDP_SMSC911X_GPIO      152
index 9cfebc5c7455b36b1123ae100efa513bd8fdbcdf..180c6aa633bd7c381f4839bafa80a00e36eb19e2 100644 (file)
 #include <linux/spi/spi.h>
 #include <linux/mm.h>
 #include <asm/mach-types.h>
-#include <video/omapdss.h>
+#include <linux/platform_data/omapdss.h>
 #include <video/omap-panel-data.h>
 
 #include <linux/platform_data/spi-omap2-mcspi.h>
 
 #include "soc.h"
 #include "board-rx51.h"
+#include "display.h"
 
 #include "mux.h"
 
@@ -32,7 +33,6 @@
 static struct connector_atv_platform_data rx51_tv_pdata = {
        .name = "tv",
        .source = "venc.0",
-       .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE,
        .invert_polarity = false,
 };
 
index 6ab13d18c6363532d8928dee3039ed7db065461c..70b3eaf085e4992113e4c1dd2273341443cb1aa4 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 
-#include <video/omapdss.h>
+#include <linux/platform_data/omapdss.h>
 #include "omap_hwmod.h"
 #include "omap_device.h"
 #include "omap-pm.h"
index 7375854b16c7a9dd39d7ab5793adc4f1b6669ecf..78f25300527980e7f23543eb6bf6d7e8fd8c4c80 100644 (file)
@@ -33,4 +33,9 @@ int omap_init_vout(void);
 
 struct device_node * __init omapdss_find_dss_of_node(void);
 
+struct omap_dss_board_info;
+
+/* Init with the board info */
+int omap_display_init(struct omap_dss_board_info *board_data);
+
 #endif
index ea2be0f5953b53d0b799f871fefd8d6bad0a7931..1d583bc0b1a90bff18b4c8dd88f38d22a2a9520d 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 
-#include <video/omapdss.h>
+#include <linux/platform_data/omapdss.h>
 #include <video/omap-panel-data.h>
 
 #include "soc.h"
index af2851fbcdf02e224bb196dc805500b788e0a400..bae263fba640af8c1ff7a3b1b98e71a35486e94c 100644 (file)
@@ -46,6 +46,7 @@
 
 #define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX        0x109
 #define OMAP5_MON_AMBA_IF_INDEX                0x108
+#define OMAP5_DRA7_MON_SET_ACR_INDEX   0x107
 
 /* Secure PPA(Primary Protected Application) APIs */
 #define OMAP4_PPA_L2_POR_INDEX         0x23
index c625cc10d9f98aefc844329c4e730f0682358038..8cd1de914ee492beaaae1934a9ff910f3208c1f3 100644 (file)
@@ -50,6 +50,39 @@ void __iomem *omap4_get_scu_base(void)
        return scu_base;
 }
 
+#ifdef CONFIG_OMAP5_ERRATA_801819
+void omap5_erratum_workaround_801819(void)
+{
+       u32 acr, revidr;
+       u32 acr_mask;
+
+       /* REVIDR[3] indicates erratum fix available on silicon */
+       asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
+       if (revidr & (0x1 << 3))
+               return;
+
+       asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+       /*
+        * BIT(27) - Disables streaming. All write-allocate lines allocate in
+        * the L1 or L2 cache.
+        * BIT(25) - Disables streaming. All write-allocate lines allocate in
+        * the L1 cache.
+        */
+       acr_mask = (0x3 << 25) | (0x3 << 27);
+       /* do we already have it done.. if yes, skip expensive smc */
+       if ((acr & acr_mask) == acr_mask)
+               return;
+
+       acr |= acr_mask;
+       omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+       pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
+                __func__, smp_processor_id());
+}
+#else
+static inline void omap5_erratum_workaround_801819(void) { }
+#endif
+
 static void omap4_secondary_init(unsigned int cpu)
 {
        /*
@@ -64,12 +97,15 @@ static void omap4_secondary_init(unsigned int cpu)
                omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
                                                        4, 0, 0, 0, 0, 0);
 
-       /*
-        * Configure the CNTFRQ register for the secondary cpu's which
-        * indicates the frequency of the cpu local timers.
-        */
-       if (soc_is_omap54xx() || soc_is_dra7xx())
+       if (soc_is_omap54xx() || soc_is_dra7xx()) {
+               /*
+                * Configure the CNTFRQ register for the secondary cpu's which
+                * indicates the frequency of the cpu local timers.
+                */
                set_cntfreq();
+               /* Configure ACR to disable streaming WA for 801819 */
+               omap5_erratum_workaround_801819();
+       }
 
        /*
         * Synchronise with the boot thread.
@@ -218,6 +254,8 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
 
        if (cpu_is_omap446x())
                startup_addr = omap4460_secondary_startup;
+       if (soc_is_dra74x() || soc_is_omap54xx())
+               omap5_erratum_workaround_801819();
 
        /*
         * Write the address of secondary startup routine into the
index 78af6d8cf2e208811f2d6b31e1aa837c3b585565..daf2753de7aa2a4681c690e49fb00d874c856a90 100644 (file)
@@ -186,8 +186,9 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
                        trace_state = (PWRDM_TRACE_STATES_FLAG |
                                       ((next & OMAP_POWERSTATE_MASK) << 8) |
                                       ((prev & OMAP_POWERSTATE_MASK) << 0));
-                       trace_power_domain_target(pwrdm->name, trace_state,
-                                                 smp_processor_id());
+                       trace_power_domain_target_rcuidle(pwrdm->name,
+                                                         trace_state,
+                                                         smp_processor_id());
                }
                break;
        default:
@@ -523,8 +524,8 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
 
        if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
                /* Trace the pwrdm desired target state */
-               trace_power_domain_target(pwrdm->name, pwrst,
-                                         smp_processor_id());
+               trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
+                                                 smp_processor_id());
                /* Program the pwrdm desired target state */
                ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
        }
index 0ec2d00f42371c208ac02f87ffc03b0db5241ace..eb350a67313367ca8c3fe405f0119c7225832180 100644 (file)
@@ -36,14 +36,7 @@ static struct powerdomain iva_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_IVA_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 4,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* hwa_mem */
-               [1] = PWRSTS_OFF_RET,   /* sl2_mem */
-               [2] = PWRSTS_OFF_RET,   /* tcm1_mem */
-               [3] = PWRSTS_OFF_RET,   /* tcm2_mem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* hwa_mem */
                [1] = PWRSTS_ON,        /* sl2_mem */
@@ -76,12 +69,7 @@ static struct powerdomain ipu_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_IPU_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 2,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* aessmem */
-               [1] = PWRSTS_OFF_RET,   /* periphmem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* aessmem */
                [1] = PWRSTS_ON,        /* periphmem */
@@ -95,11 +83,7 @@ static struct powerdomain dss_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_DSS_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* dss_mem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dss_mem */
        },
@@ -111,13 +95,8 @@ static struct powerdomain l4per_7xx_pwrdm = {
        .name             = "l4per_pwrdm",
        .prcm_offs        = DRA7XX_PRM_L4PER_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
-       .pwrsts           = PWRSTS_RET_ON,
-       .pwrsts_logic_ret = PWRSTS_RET,
+       .pwrsts           = PWRSTS_ON,
        .banks            = 2,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* nonretained_bank */
-               [1] = PWRSTS_OFF_RET,   /* retained_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* nonretained_bank */
                [1] = PWRSTS_ON,        /* retained_bank */
@@ -132,9 +111,6 @@ static struct powerdomain gpu_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* gpu_mem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* gpu_mem */
        },
@@ -148,8 +124,6 @@ static struct powerdomain wkupaon_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* wkup_bank */
        },
@@ -161,15 +135,7 @@ static struct powerdomain core_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_CORE_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_ON,
-       .pwrsts_logic_ret = PWRSTS_RET,
        .banks            = 5,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* core_nret_bank */
-               [1] = PWRSTS_OFF_RET,   /* core_ocmram */
-               [2] = PWRSTS_OFF_RET,   /* core_other_bank */
-               [3] = PWRSTS_OFF_RET,   /* ipu_l2ram */
-               [4] = PWRSTS_OFF_RET,   /* ipu_unicache */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* core_nret_bank */
                [1] = PWRSTS_ON,        /* core_ocmram */
@@ -226,11 +192,7 @@ static struct powerdomain vpe_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_VPE_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* vpe_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* vpe_bank */
        },
@@ -260,14 +222,8 @@ static struct powerdomain l3init_7xx_pwrdm = {
        .name             = "l3init_pwrdm",
        .prcm_offs        = DRA7XX_PRM_L3INIT_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
-       .pwrsts           = PWRSTS_RET_ON,
-       .pwrsts_logic_ret = PWRSTS_RET,
+       .pwrsts           = PWRSTS_ON,
        .banks            = 3,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* gmac_bank */
-               [1] = PWRSTS_OFF_RET,   /* l3init_bank1 */
-               [2] = PWRSTS_OFF_RET,   /* l3init_bank2 */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* gmac_bank */
                [1] = PWRSTS_ON,        /* l3init_bank1 */
@@ -283,9 +239,6 @@ static struct powerdomain eve3_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve3_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve3_bank */
        },
@@ -299,9 +252,6 @@ static struct powerdomain emu_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* emu_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* emu_bank */
        },
@@ -314,11 +264,6 @@ static struct powerdomain dsp2_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 3,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* dsp2_edma */
-               [1] = PWRSTS_OFF_RET,   /* dsp2_l1 */
-               [2] = PWRSTS_OFF_RET,   /* dsp2_l2 */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dsp2_edma */
                [1] = PWRSTS_ON,        /* dsp2_l1 */
@@ -334,11 +279,6 @@ static struct powerdomain dsp1_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 3,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* dsp1_edma */
-               [1] = PWRSTS_OFF_RET,   /* dsp1_l1 */
-               [2] = PWRSTS_OFF_RET,   /* dsp1_l2 */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dsp1_edma */
                [1] = PWRSTS_ON,        /* dsp1_l1 */
@@ -354,9 +294,6 @@ static struct powerdomain cam_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* vip_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* vip_bank */
        },
@@ -370,9 +307,6 @@ static struct powerdomain eve4_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve4_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve4_bank */
        },
@@ -386,9 +320,6 @@ static struct powerdomain eve2_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve2_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve2_bank */
        },
@@ -402,9 +333,6 @@ static struct powerdomain eve1_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve1_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve1_bank */
        },
index 5b385bb8aff9673b21b5f18edeab8d8114525180..cb9497a20fb307548cea76a895274c459b534d06 100644 (file)
@@ -496,8 +496,7 @@ void __init omap_init_time(void)
        __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
                        2, "timer_sys_ck", NULL, false);
 
-       if (of_have_populated_dt())
-               clocksource_probe();
+       clocksource_probe();
 }
 
 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
@@ -505,6 +504,8 @@ void __init omap3_secure_sync32k_timer_init(void)
 {
        __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
                        2, "timer_sys_ck", NULL, false);
+
+       clocksource_probe();
 }
 #endif /* CONFIG_ARCH_OMAP3 */
 
@@ -513,6 +514,8 @@ void __init omap3_gptimer_timer_init(void)
 {
        __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
                        1, "timer_sys_ck", "ti,timer-alwon", true);
+
+       clocksource_probe();
 }
 #endif
 
index 5766ce2be32bbd28452c95ea9e43e92de0f4dcb0..8409cab3f760cc798a14cadc5f017feab53d1b65 100644 (file)
@@ -547,7 +547,7 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
 
        init.name = dev_name(cpu_dev);
        init.ops = &clk_spc_ops;
-       init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
+       init.flags = CLK_GET_RATE_NOCACHE;
        init.num_parents = 0;
 
        return devm_clk_register(cpu_dev, &spc->hw);
index 84baa16f4c0b674dfe404b54bb9fe09b4fd682bf..e93aa6734147915b17a283aa7530a2176f175be6 100644 (file)
@@ -68,7 +68,7 @@
 #include <linux/platform_data/asoc-s3c.h>
 #include <linux/platform_data/spi-s3c64xx.h>
 
-static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
+#define samsung_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
 
 /* AC97 */
 #ifdef CONFIG_CPU_S3C2440
index 76747d92bc728fedf751a0e5837ef66b955e4efd..5a0a691d4220a60aab7a1ef2251f8ba85ddffe2a 100644 (file)
@@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT
 config MMU
        def_bool y
 
+config ARM64_PAGE_SHIFT
+       int
+       default 16 if ARM64_64K_PAGES
+       default 14 if ARM64_16K_PAGES
+       default 12
+
+config ARM64_CONT_SHIFT
+       int
+       default 5 if ARM64_64K_PAGES
+       default 7 if ARM64_16K_PAGES
+       default 4
+
 config ARCH_MMAP_RND_BITS_MIN
        default 14 if ARM64_64K_PAGES
        default 16 if ARM64_16K_PAGES
@@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375
 
          If unsure, say Y.
 
+config CAVIUM_ERRATUM_23144
+       bool "Cavium erratum 23144: ITS SYNC hang on dual socket system"
+       depends on NUMA
+       default y
+       help
+         ITS SYNC command hang for cross node io and collections/cpu mapping.
+
+         If unsure, say Y.
+
 config CAVIUM_ERRATUM_23154
        bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed"
        default y
index 710fde4ad0f0fade7e8a93f4c667610bf4715c53..0cc758cdd0dc07587200dd6a2d8dd5f15dba2500 100644 (file)
@@ -12,7 +12,8 @@ config ARM64_PTDUMP
          who are working in architecture specific areas of the kernel.
          It is probably not a good idea to enable this feature in a production
          kernel.
-         If in doubt, say "N"
+
+         If in doubt, say N.
 
 config PID_IN_CONTEXTIDR
        bool "Write the current PID to the CONTEXTIDR register"
@@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET
          value.
 
 config DEBUG_SET_MODULE_RONX
-        bool "Set loadable kernel module data as NX and text as RO"
-        depends on MODULES
-        help
-          This option helps catch unintended modifications to loadable
-          kernel module's text and read-only data. It also prevents execution
-          of module data. Such protection may interfere with run-time code
-          patching and dynamic kernel tracing - and they might also protect
-          against certain classes of kernel exploits.
-          If in doubt, say "N".
+       bool "Set loadable kernel module data as NX and text as RO"
+       depends on MODULES
+       default y
+       help
+         Is this is set, kernel module text and rodata will be made read-only.
+         This is to help catch accidental or malicious attempts to change the
+         kernel's executable code.
+
+         If in doubt, say Y.
 
 config DEBUG_RODATA
        bool "Make kernel text and rodata read-only"
@@ -56,7 +57,7 @@ config DEBUG_RODATA
          is to help catch accidental or malicious attempts to change the
          kernel's executable code.
 
-         If in doubt, say Y
+         If in doubt, say Y.
 
 config DEBUG_ALIGN_RODATA
        depends on DEBUG_RODATA
@@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA
          alignment and potentially wasted space. Turn on this option if
          performance is more important than memory pressure.
 
-         If in doubt, say N
+         If in doubt, say N.
 
 source "drivers/hwtracing/coresight/Kconfig"
 
index 354d75402aceaa771adc63c04ecd51538edcbb0b..648a32c89541845affc45426efc6e2e64921d92f 100644 (file)
@@ -60,7 +60,9 @@ head-y                := arch/arm64/kernel/head.o
 
 # The byte offset of the kernel image in RAM from the start of RAM.
 ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
+                int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
+                rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
 else
 TEXT_OFFSET := 0x00080000
 endif
@@ -93,7 +95,7 @@ boot := arch/arm64/boot
 Image: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
-Image.%: vmlinux
+Image.%: Image
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 zinstall install:
index 3a4e9a2ab313010614caf641cce5aca55b0ff6c6..fbafa24cd5335b90de29af9a3f3a731bc616ac10 100644 (file)
                #size-cells = <1>;
                #interrupts-cells = <3>;
 
-               compatible = "arm,amba-bus";
+               compatible = "simple-bus";
                interrupt-parent = <&gic>;
                ranges;
 
index 46f325a143b0ff0514409139cd89be34a65ae35f..d7f8e06910bcada5899399b87d3a8a926813eb53 100644 (file)
        };
 
        amba {
-               compatible = "arm,amba-bus";
+               compatible = "simple-bus";
                #address-cells = <2>;
                #size-cells = <2>;
                ranges;
index 87e1985f3be8c5912d1b1ca4fbc5d8d70024db50..9d9fd4b9a72e574baba29be7381e8740f06b2d66 100644 (file)
 #define APM_CPU_PART_POTENZA           0x000
 
 #define CAVIUM_CPU_PART_THUNDERX       0x0A1
+#define CAVIUM_CPU_PART_THUNDERX_81XX  0x0A2
 
 #define BRCM_CPU_PART_VULCAN           0x516
 
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_THUNDERX  MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 
 #ifndef __ASSEMBLY__
 
index 7a09c48c04752d2b1790c1777dd1b72dd39dd117..579b6e654f2d33998cd207eb21b4aaeb6bdd4ef0 100644 (file)
@@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
 #define STACK_RND_MASK                 (0x3ffff >> (PAGE_SHIFT - 12))
 #endif
 
-#ifdef CONFIG_COMPAT
-
 #ifdef __AARCH64EB__
 #define COMPAT_ELF_PLATFORM            ("v8b")
 #else
 #define COMPAT_ELF_PLATFORM            ("v8l")
 #endif
 
+#ifdef CONFIG_COMPAT
+
 #define COMPAT_ELF_ET_DYN_BASE         (2 * TASK_SIZE_32 / 3)
 
 /* AArch32 registers. */
index f69f69c8120c3343b118cb81c533563656de92c9..da84645525b902dc05154a7ba16f88718b1aa353 100644 (file)
@@ -38,25 +38,54 @@ extern int kgdb_fault_expected;
 #endif /* !__ASSEMBLY__ */
 
 /*
- * gdb is expecting the following registers layout.
+ * gdb remote procotol (well most versions of it) expects the following
+ * register layout.
  *
  * General purpose regs:
  *     r0-r30: 64 bit
  *     sp,pc : 64 bit
- *     pstate  : 64 bit
- *     Total: 34
+ *     pstate  : 32 bit
+ *     Total: 33 + 1
  * FPU regs:
  *     f0-f31: 128 bit
- *     Total: 32
- * Extra regs
  *     fpsr & fpcr: 32 bit
- *     Total: 2
+ *     Total: 32 + 2
  *
+ * To expand a little on the "most versions of it"... when the gdb remote
+ * protocol for AArch64 was developed it depended on a statement in the
+ * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register".
+ * and, as a result, allocated only 32-bits for the PSTATE in the remote
+ * protocol. In fact this statement is still present in ARM DDI 0487A.i.
+ *
+ * Unfortunately "is a 32-bit register" has a very special meaning for
+ * system registers. It means that "the upper bits, bits[63:32], are
+ * RES0.". RES0 is heavily used in the ARM architecture documents as a
+ * way to leave space for future architecture changes. So to translate a
+ * little for people who don't spend their spare time reading ARM architecture
+ * manuals, what "is a 32-bit register" actually means in this context is
+ * "is a 64-bit register but one with no meaning allocated to any of the
+ * upper 32-bits... *yet*".
+ *
+ * Perhaps then we should not be surprised that this has led to some
+ * confusion. Specifically a patch, influenced by the above translation,
+ * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch
+ * was reverted in gdb-7.8.1 and all later releases, when this was
+ * discovered to be an undocumented protocol change.
+ *
+ * So... it is *not* wrong for us to only allocate 32-bits to PSTATE
+ * here even though the kernel itself allocates 64-bits for the same
+ * state. That is because this bit of code tells the kernel how the gdb
+ * remote protocol (well most versions of it) describes the register state.
+ *
+ * Note that if you are using one of the versions of gdb that supports
+ * the gdb-7.7 version of the protocol you cannot use kgdb directly
+ * without providing a custom register description (gdb can load new
+ * protocol descriptions at runtime).
  */
 
-#define _GP_REGS               34
+#define _GP_REGS               33
 #define _FP_REGS               32
-#define _EXTRA_REGS            2
+#define _EXTRA_REGS            3
 /*
  * general purpose registers size in bytes.
  * pstate is only 4 bytes. subtract 4 bytes
index 72a3025bb5830f013ed93b2e831261eef6e641aa..31b73227b41f3076813f5d33122a3e1951e47f91 100644 (file)
@@ -55,8 +55,9 @@
 #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
 
 /*
- * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * PAGE_OFFSET - the virtual address of the start of the linear map (top
  *              (VA_BITS - 1))
+ * KIMAGE_VADDR - the virtual address of the start of the kernel image
  * VA_BITS - the maximum number of bits for virtual addresses.
  * VA_START - the first kernel virtual address.
  * TASK_SIZE - the maximum size of a user space task.
index 17b45f7d96d3e3f7668bcc93dd914dbde8bd1e84..8472c6def5ef5efe53ba8a6fbd0be3f456c95d1a 100644 (file)
 
 /* PAGE_SHIFT determines the page size */
 /* CONT_SHIFT determines the number of pages which can be tracked together  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define PAGE_SHIFT             16
-#define CONT_SHIFT             5
-#elif defined(CONFIG_ARM64_16K_PAGES)
-#define PAGE_SHIFT             14
-#define CONT_SHIFT             7
-#else
-#define PAGE_SHIFT             12
-#define CONT_SHIFT             4
-#endif
+#define PAGE_SHIFT             CONFIG_ARM64_PAGE_SHIFT
+#define CONT_SHIFT             CONFIG_ARM64_CONT_SHIFT
 #define PAGE_SIZE              (_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK              (~(PAGE_SIZE-1))
 
index ff98585d085aa5737c9c17478555f22b11261f2f..d25f4f137c2aa1c758fa614af40361bfada50aef 100644 (file)
@@ -26,7 +26,7 @@
 
 #define check_pgt_cache()              do { } while (0)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 #define PGD_SIZE       (PTRS_PER_PGD * sizeof(pgd_t))
 
 #if CONFIG_PGTABLE_LEVELS > 2
index a307eb6e7fa8d75e536fe3cff56c5d4f978fe212..7f94755089e200afbd4c015316da4517fe113bcd 100644 (file)
@@ -117,6 +117,8 @@ struct pt_regs {
        };
        u64 orig_x0;
        u64 syscallno;
+       u64 orig_addr_limit;
+       u64 unused;     // maintain 16 byte alignment
 };
 
 #define arch_has_single_step() (1)
index 433e504052741ef752dac96f8be0447b6ed8c1fa..022644704a936dce53bb933f9bf6635835e32530 100644 (file)
@@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void)
        cpu_park_loop();
 }
 
+/*
+ * If a secondary CPU enters the kernel but fails to come online,
+ * (e.g. due to mismatched features), and cannot exit the kernel,
+ * we increment cpus_stuck_in_kernel and leave the CPU in a
+ * quiesecent loop within the kernel text. The memory containing
+ * this loop must not be re-used for anything else as the 'stuck'
+ * core is executing it.
+ *
+ * This function is used to inhibit features like kexec and hibernate.
+ */
+bool cpus_are_stuck_in_kernel(void);
+
 #endif /* ifndef __ASSEMBLY__ */
 
 #endif /* ifndef __ASM_SMP_H */
index fc9682bfe0020caebf99412131fb760d5b8b870d..e875a5a551d7debeab3c34ed2d52e9beae7c8f7d 100644 (file)
@@ -30,22 +30,53 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
        unsigned int tmp;
        arch_spinlock_t lockval;
+       u32 owner;
+
+       /*
+        * Ensure prior spin_lock operations to other locks have completed
+        * on this CPU before we test whether "lock" is locked.
+        */
+       smp_mb();
+       owner = READ_ONCE(lock->owner) << 16;
 
        asm volatile(
 "      sevl\n"
 "1:    wfe\n"
 "2:    ldaxr   %w0, %2\n"
+       /* Is the lock free? */
 "      eor     %w1, %w0, %w0, ror #16\n"
-"      cbnz    %w1, 1b\n"
+"      cbz     %w1, 3f\n"
+       /* Lock taken -- has there been a subsequent unlock->lock transition? */
+"      eor     %w1, %w3, %w0, lsl #16\n"
+"      cbz     %w1, 1b\n"
+       /*
+        * The owner has been updated, so there was an unlock->lock
+        * transition that we missed. That means we can rely on the
+        * store-release of the unlock operation paired with the
+        * load-acquire of the lock operation to publish any of our
+        * previous stores to the new lock owner and therefore don't
+        * need to bother with the writeback below.
+        */
+"      b       4f\n"
+"3:\n"
+       /*
+        * Serialise against any concurrent lockers by writing back the
+        * unlocked lock value
+        */
        ARM64_LSE_ATOMIC_INSN(
        /* LL/SC */
 "      stxr    %w1, %w0, %2\n"
-"      cbnz    %w1, 2b\n", /* Serialise against any concurrent lockers */
-       /* LSE atomics */
 "      nop\n"
-"      nop\n")
+"      nop\n",
+       /* LSE atomics */
+"      mov     %w1, %w0\n"
+"      cas     %w0, %w0, %2\n"
+"      eor     %w1, %w1, %w0\n")
+       /* Somebody else wrote to the lock, GOTO 10 and reload the value */
+"      cbnz    %w1, 2b\n"
+"4:"
        : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
-       :
+       : "r" (owner)
        : "memory");
 }
 
@@ -148,6 +179,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
+       smp_mb(); /* See arch_spin_unlock_wait */
        return !arch_spin_value_unlocked(READ_ONCE(*lock));
 }
 
index 0685d74572af788b05d44658c7dba7e4fc3742ba..9e397a5427565a1b35d1e8bdd170d3a88ac3d457 100644 (file)
@@ -80,19 +80,6 @@ static inline void set_fs(mm_segment_t fs)
 
 #define segment_eq(a, b)       ((a) == (b))
 
-/*
- * Return 1 if addr < current->addr_limit, 0 otherwise.
- */
-#define __addr_ok(addr)                                                        \
-({                                                                     \
-       unsigned long flag;                                             \
-       asm("cmp %1, %0; cset %0, lo"                                   \
-               : "=&r" (flag)                                          \
-               : "r" (addr), "0" (current_thread_info()->addr_limit)   \
-               : "cc");                                                \
-       flag;                                                           \
-})
-
 /*
  * Test whether a block of memory is a valid user space address.
  * Returns 1 if the range is valid, 0 otherwise.
index 41e58fe3c041e9adcade0f113064ea42e87045ba..e78ac26324bd809dcd5fa2f7f45465daba8c158d 100644 (file)
@@ -44,7 +44,7 @@
 #define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
 
-#define __NR_compat_syscalls           390
+#define __NR_compat_syscalls           394
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index 5b925b761a2a8857a62720110076e062edd4d7f3..b7e8ef16ff0dc62b94a042e4a0b70c9884e4cb16 100644 (file)
@@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat)
 __SYSCALL(__NR_userfaultfd, sys_userfaultfd)
 #define __NR_membarrier 389
 __SYSCALL(__NR_membarrier, sys_membarrier)
+#define __NR_mlock2 390
+__SYSCALL(__NR_mlock2, sys_mlock2)
+#define __NR_copy_file_range 391
+__SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 392
+__SYSCALL(__NR_preadv2, compat_sys_preadv2)
+#define __NR_pwritev2 393
+__SYSCALL(__NR_pwritev2, compat_sys_pwritev2)
 
 /*
  * Please add new compat syscalls above this comment and update
index f8e5d47f08807aa41d33c84a323e4bf1f37ffebf..2f4ba774488ad5cc72444a8d351ce63fa504351c 100644 (file)
@@ -60,6 +60,7 @@ int main(void)
   DEFINE(S_PC,                 offsetof(struct pt_regs, pc));
   DEFINE(S_ORIG_X0,            offsetof(struct pt_regs, orig_x0));
   DEFINE(S_SYSCALLNO,          offsetof(struct pt_regs, syscallno));
+  DEFINE(S_ORIG_ADDR_LIMIT,    offsetof(struct pt_regs, orig_addr_limit));
   DEFINE(S_FRAME_SIZE,         sizeof(struct pt_regs));
   BLANK();
   DEFINE(MM_CONTEXT_ID,                offsetof(struct mm_struct, context.id.counter));
index d42789499f17eb322a47da83046ca0119e643c75..af716b65110d901d1a1fea40da801240475c9ccf 100644 (file)
@@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                MIDR_RANGE(MIDR_THUNDERX, 0x00,
                           (1 << MIDR_VARIANT_SHIFT) | 1),
        },
+       {
+       /* Cavium ThunderX, T81 pass 1.0 */
+               .desc = "Cavium erratum 27456",
+               .capability = ARM64_WORKAROUND_CAVIUM_27456,
+               MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
+       },
 #endif
        {
        }
index 3808470486f38a7e36db43a07e0ce43cdd193d1e..c173d329397f6ebd8f07ce2d755f7b794e1532d0 100644 (file)
@@ -22,6 +22,8 @@
 
 #include <linux/bitops.h>
 #include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/personality.h>
@@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = {
 static int c_show(struct seq_file *m, void *v)
 {
        int i, j;
+       bool compat = personality(current->personality) == PER_LINUX32;
 
        for_each_online_cpu(i) {
                struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
@@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v)
                 * "processor".  Give glibc what it expects.
                 */
                seq_printf(m, "processor\t: %d\n", i);
+               if (compat)
+                       seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+                                  MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
 
                seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
                           loops_per_jiffy / (500000UL/HZ),
@@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v)
                 * software which does already (at least for 32-bit).
                 */
                seq_puts(m, "Features\t:");
-               if (personality(current->personality) == PER_LINUX32) {
+               if (compat) {
 #ifdef CONFIG_COMPAT
                        for (j = 0; compat_hwcap_str[j]; j++)
                                if (compat_elf_hwcap & (1 << j))
index 12e8d2bcb3f9da4e643649442685138dea7966d1..6c3b7345a6c428f5952909ac0833684ca0401f1a 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/errno.h>
 #include <asm/esr.h>
 #include <asm/irq.h>
+#include <asm/memory.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 
        mov     x29, xzr                        // fp pointed to user-space
        .else
        add     x21, sp, #S_FRAME_SIZE
-       .endif
+       get_thread_info tsk
+       /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+       ldr     x20, [tsk, #TI_ADDR_LIMIT]
+       str     x20, [sp, #S_ORIG_ADDR_LIMIT]
+       mov     x20, #TASK_SIZE_64
+       str     x20, [tsk, #TI_ADDR_LIMIT]
+       ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
+       .endif /* \el == 0 */
        mrs     x22, elr_el1
        mrs     x23, spsr_el1
        stp     lr, x21, [sp, #S_LR]
        .endm
 
        .macro  kernel_exit, el
+       .if     \el != 0
+       /* Restore the task's original addr_limit. */
+       ldr     x20, [sp, #S_ORIG_ADDR_LIMIT]
+       str     x20, [tsk, #TI_ADDR_LIMIT]
+
+       /* No need to restore UAO, it will be restored from SPSR_EL1 */
+       .endif
+
        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
        .if     \el == 0
        ct_user_enter
@@ -406,7 +422,6 @@ el1_irq:
        bl      trace_hardirqs_off
 #endif
 
-       get_thread_info tsk
        irq_handler
 
 #ifdef CONFIG_PREEMPT
index f8df75d740f4891a018aa84e02cb7c2988f3ff84..21ab5df9fa76ddea54d58abf77812593ddbb0f6d 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/pgtable.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/sections.h>
+#include <asm/smp.h>
 #include <asm/suspend.h>
 #include <asm/virt.h>
 
@@ -236,6 +237,11 @@ int swsusp_arch_suspend(void)
        unsigned long flags;
        struct sleep_stack_data state;
 
+       if (cpus_are_stuck_in_kernel()) {
+               pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
+               return -EBUSY;
+       }
+
        local_dbg_save(flags);
 
        if (__cpu_suspend_enter(&state)) {
index b67531a13136dd691bba7799af4c1664dc84b3e4..b5f063e5eff7dbe59516dec92b64782a25ff8d16 100644 (file)
@@ -58,7 +58,17 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
        { "x30", 8, offsetof(struct pt_regs, regs[30])},
        { "sp", 8, offsetof(struct pt_regs, sp)},
        { "pc", 8, offsetof(struct pt_regs, pc)},
-       { "pstate", 8, offsetof(struct pt_regs, pstate)},
+       /*
+        * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote
+        * protocol disagrees. Therefore we must extract only the lower
+        * 32-bits. Look for the big comment in asm/kgdb.h for more
+        * detail.
+        */
+       { "pstate", 4, offsetof(struct pt_regs, pstate)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+                                                       + 4
+#endif
+       },
        { "v0", 16, -1 },
        { "v1", 16, -1 },
        { "v2", 16, -1 },
@@ -128,6 +138,8 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
        memset((char *)gdb_regs, 0, NUMREGBYTES);
        thread_regs = task_pt_regs(task);
        memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+       /* Special case for PSTATE (check comments in asm/kgdb.h for details) */
+       dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs);
 }
 
 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
index 678e0842cb3b72f891684ce502009168fc444627..62ff3c0622e2e6d83395de24fe93dcd0071f0c62 100644 (file)
@@ -909,3 +909,21 @@ int setup_profiling_timer(unsigned int multiplier)
 {
        return -EINVAL;
 }
+
+static bool have_cpu_die(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       int any_cpu = raw_smp_processor_id();
+
+       if (cpu_ops[any_cpu]->cpu_die)
+               return true;
+#endif
+       return false;
+}
+
+bool cpus_are_stuck_in_kernel(void)
+{
+       bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
+
+       return !!cpus_stuck_in_kernel || smp_spin_tables;
+}
index c5392081b49ba4ac4f48d9a0782442ac888ed222..2a43012616b7c5e31555e0a45323d74a9181ccd7 100644 (file)
@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
 
        /*
         * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
+        * to safely read from kernel space.
         */
        fs = get_fs();
        set_fs(KERNEL_DS);
@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
        print_ip_sym(where);
 }
 
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
 {
        unsigned long addr = instruction_pointer(regs);
-       mm_segment_t fs;
        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
        int i;
 
-       /*
-        * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
-        */
-       fs = get_fs();
-       set_fs(KERNEL_DS);
-
        for (i = -4; i < 1; i++) {
                unsigned int val, bad;
 
@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
                }
        }
        printk("%sCode: %s\n", lvl, str);
+}
 
-       set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+       if (!user_mode(regs)) {
+               mm_segment_t fs = get_fs();
+               set_fs(KERNEL_DS);
+               __dump_instr(lvl, regs);
+               set_fs(fs);
+       } else {
+               __dump_instr(lvl, regs);
+       }
 }
 
 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
@@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
        void __user *pc = (void __user *)instruction_pointer(regs);
        console_verbose();
 
-       pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
-               handler[reason], esr, esr_get_class_string(esr));
+       pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
+               handler[reason], smp_processor_id(), esr,
+               esr_get_class_string(esr));
        __show_regs(regs);
 
        info.si_signo = SIGILL;
index fff7cd42b3a381734d00a4043ccfd626732dfd9c..5f8f80b4a2240614baf9d10fee0b21ae2d5d5cc1 100644 (file)
@@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
         * Make sure stores to the GIC via the memory mapped interface
         * are now visible to the system register interface.
         */
-       dsb(st);
+       if (!cpu_if->vgic_sre)
+               dsb(st);
 
        cpu_if->vgic_vmcr  = read_gicreg(ICH_VMCR_EL2);
 
@@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
                        if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
                                continue;
 
-                       if (cpu_if->vgic_elrsr & (1 << i)) {
+                       if (cpu_if->vgic_elrsr & (1 << i))
                                cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
-                               continue;
-                       }
+                       else
+                               cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
 
-                       cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
                        __gic_v3_set_lr(0, i);
                }
 
@@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
 
        val = read_gicreg(ICC_SRE_EL2);
        write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
-       isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
-       write_gicreg(1, ICC_SRE_EL1);
+
+       if (!cpu_if->vgic_sre) {
+               /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
+               isb();
+               write_gicreg(1, ICC_SRE_EL1);
+       }
 }
 
 void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
@@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
         * been actually programmed with the value we want before
         * starting to mess with the rest of the GIC.
         */
-       write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
-       isb();
+       if (!cpu_if->vgic_sre) {
+               write_gicreg(0, ICC_SRE_EL1);
+               isb();
+       }
 
        val = read_gicreg(ICH_VTR_EL2);
        max_lr_idx = vtr_to_max_lr_idx(val);
@@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
         * (re)distributors. This ensure the guest will read the
         * correct values from the memory-mapped interface.
         */
-       isb();
-       dsb(sy);
+       if (!cpu_if->vgic_sre) {
+               isb();
+               dsb(sy);
+       }
        vcpu->arch.vgic_cpu.live_lrs = live_lrs;
 
        /*
         * Prevent the guest from touching the GIC system registers if
         * SRE isn't enabled for GICv3 emulation.
         */
-       if (!cpu_if->vgic_sre) {
-               write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
-                            ICC_SRE_EL2);
-       }
+       write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+                    ICC_SRE_EL2);
 }
 
 void __hyp_text __vgic_v3_init_lrs(void)
index 7bbe3ff02602f93f87ffd8a04ac0577ce3aea038..a57d650f552cd67ed15964b645001791bf05ae6c 100644 (file)
@@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
        return true;
 }
 
+static bool access_gic_sre(struct kvm_vcpu *vcpu,
+                          struct sys_reg_params *p,
+                          const struct sys_reg_desc *r)
+{
+       if (p->is_write)
+               return ignore_write(vcpu, p);
+
+       p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
+       return true;
+}
+
 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
                        struct sys_reg_params *p,
                        const struct sys_reg_desc *r)
@@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
          access_gic_sgi },
        /* ICC_SRE_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
-         trap_raz_wi },
+         access_gic_sre },
 
        /* CONTEXTIDR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
index b7b397802088d1b6c5af50efac9aa49852cbf242..efcf1f7ef1e4c27b4c0c40546dc542122f824933 100644 (file)
@@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
                                                 &asid_generation);
        flush_context(cpu);
 
-       /* We have at least 1 ASID per CPU, so this will always succeed */
+       /* We have more ASIDs than CPUs, so this will always succeed */
        asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
 
 set_asid:
@@ -227,8 +227,11 @@ switch_mm_fastpath:
 static int asids_init(void)
 {
        asid_bits = get_cpu_asid_bits();
-       /* If we end up with more CPUs than ASIDs, expect things to crash */
-       WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
+       /*
+        * Expect allocation after rollover to fail if we don't have at least
+        * one more ASID than CPUs. ASID #0 is reserved for init_mm.
+        */
+       WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
        atomic64_set(&asid_generation, ASID_FIRST_VERSION);
        asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
                           GFP_KERNEL);
index 8404190fe2bd43cbc78fcc7f8b113bdce4213b88..ccfde237d6e690726fe5ca899df1f0e0c53eddc4 100644 (file)
@@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = {
 
 struct pg_level {
        const struct prot_bits *bits;
+       const char *name;
        size_t num;
        u64 mask;
 };
@@ -157,15 +158,19 @@ struct pg_level {
 static struct pg_level pg_level[] = {
        {
        }, { /* pgd */
+               .name   = "PGD",
                .bits   = pte_bits,
                .num    = ARRAY_SIZE(pte_bits),
        }, { /* pud */
+               .name   = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
                .bits   = pte_bits,
                .num    = ARRAY_SIZE(pte_bits),
        }, { /* pmd */
+               .name   = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
                .bits   = pte_bits,
                .num    = ARRAY_SIZE(pte_bits),
        }, { /* pte */
+               .name   = "PTE",
                .bits   = pte_bits,
                .num    = ARRAY_SIZE(pte_bits),
        },
@@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
                                delta >>= 10;
                                unit++;
                        }
-                       seq_printf(st->seq, "%9lu%c", delta, *unit);
+                       seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+                                  pg_level[st->level].name);
                        if (pg_level[st->level].bits)
                                dump_prot(st, pg_level[st->level].bits,
                                          pg_level[st->level].num);
index 5954881a35ac516aed1bfb0844249feb551f40b3..b1166d1e5955cd80096ea7412a2371008ec81b7c 100644 (file)
@@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
         * PTE_RDONLY is cleared by default in the asm below, so set it in
         * back if necessary (read-only or clean PTE).
         */
-       if (!pte_write(entry) || !dirty)
+       if (!pte_write(entry) || !pte_sw_dirty(entry))
                pte_val(entry) |= PTE_RDONLY;
 
        /*
@@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        }
 
        if (permission_fault(esr) && (addr < USER_DS)) {
-               if (get_fs() == KERNEL_DS)
+               /* regs->orig_addr_limit may be 0 if we entered from EL0 */
+               if (regs->orig_addr_limit == KERNEL_DS)
                        die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
 
                if (!search_exception_tables(regs->pc))
@@ -441,7 +442,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
        return 1;
 }
 
-static struct fault_info {
+static const struct fault_info {
        int     (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
        int     sig;
        int     code;
index dbd12ea8ce684993f1adf5c100a06d223b646073..43a76b07eb32dc9c6324c53ddcc601283c6005b2 100644 (file)
@@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
 {
        struct page *page = pte_page(pte);
 
-       /* no flushing needed for anonymous pages */
-       if (!page_mapping(page))
-               return;
-
        if (!test_and_set_bit(PG_dcache_clean, &page->flags))
                sync_icache_aliases(page_address(page),
                                    PAGE_SIZE << compound_order(page));
index aa8aee7d69293ef8004b4883bebde05493ece111..2e49bd252fe7643adb66906e2381159b60949520 100644 (file)
@@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt)
                hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
        } else if (ps == PUD_SIZE) {
                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+       } else if (ps == (PAGE_SIZE * CONT_PTES)) {
+               hugetlb_add_hstate(CONT_PTE_SHIFT);
+       } else if (ps == (PMD_SIZE * CONT_PMDS)) {
+               hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
        } else {
                hugetlb_bad_size();
                pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt)
        return 1;
 }
 __setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_ARM64_64K_PAGES
+static __init int add_default_hugepagesz(void)
+{
+       if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
+               hugetlb_add_hstate(CONT_PMD_SHIFT);
+       return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
index 1aba19d68c5ededf4ce986340bf410bb8e05eb3e..db039cb368bef85edaf7461f2f9f834be79a6bfd 100644 (file)
@@ -43,7 +43,7 @@ static inline void pgd_ctor(void *x)
  */
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
+       return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
 }
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -63,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
        struct page *page;
        void *pg;
 
-       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
        if (!pg)
                return NULL;
 
index 235ece437dddc7196be819a8b7b30dc3f26028a5..42f1affb9c2dda555041404be054b9ae5e873f85 100644 (file)
@@ -24,14 +24,14 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *pte;
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+       pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
index 41907d25ed384e2ee42e46d1df461bbd2886732f..c9ed14f6c67de118ac789f7f58e3937df7ad3ddf 100644 (file)
@@ -22,7 +22,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
        if (pte)
                clear_page(pte);
        return pte;
@@ -33,9 +33,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
        struct page *page;
 
 #ifdef CONFIG_HIGHPTE
-       page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+       page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
 #else
-       page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       page = alloc_pages(GFP_KERNEL, 0);
 #endif
        if (!page)
                return NULL;
index 77da3b0ae3c2b2c37636f4f0e8dc3ee296e80268..eeebf862c46c6aa16a5a994686f80439b4db6c4b 100644 (file)
@@ -64,7 +64,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
@@ -78,7 +78,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       gfp_t flags =  GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+       gfp_t flags =  GFP_KERNEL | __GFP_ZERO;
        return (pte_t *) __get_free_page(flags);
 }
 
index f80758cb7157df174e930d58306be808e97b248a..e109ee95e9191df66b4d5c167736f4c9d698fffc 100644 (file)
@@ -45,7 +45,7 @@ config IA64
        select GENERIC_SMP_IDLE_THREAD
        select ARCH_INIT_TASK
        select ARCH_TASK_STRUCT_ALLOCATOR
-       select ARCH_THREAD_INFO_ALLOCATOR
+       select ARCH_THREAD_STACK_ALLOCATOR
        select ARCH_CLOCKSOURCE_DATA
        select GENERIC_TIME_VSYSCALL_OLD
        select SYSCTL_ARCH_UNALIGN_NO_WARN
index aa995b67c3f51abe459879648025cdd1fe30724c..d1212b84fb8369636b5d226936b89fa98c0d0115 100644 (file)
@@ -48,15 +48,15 @@ struct thread_info {
 #ifndef ASM_OFFSETS_C
 /* how to get the thread information struct from C */
 #define current_thread_info()  ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
-#define alloc_thread_info_node(tsk, node)      \
-               ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define alloc_thread_stack_node(tsk, node)     \
+               ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
 #define task_thread_info(tsk)  ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
 #else
 #define current_thread_info()  ((struct thread_info *) 0)
-#define alloc_thread_info_node(tsk, node)      ((struct thread_info *) 0)
+#define alloc_thread_stack_node(tsk, node)     ((unsigned long *) 0)
 #define task_thread_info(tsk)  ((struct thread_info *) 0)
 #endif
-#define free_thread_info(ti)   /* nothing */
+#define free_thread_stack(ti)  /* nothing */
 #define task_stack_page(tsk)   ((void *)(tsk))
 
 #define __HAVE_THREAD_FUNCTIONS
index f9efe9739d3fe5da25621d278e4c7a2e6a8110e5..0eaa89f3defd5d8b8f5abf0b7b1e0d1f35ac8d2d 100644 (file)
@@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
  * handled. This is done by having a special ".data..init_task" section...
  */
 #define init_thread_info       init_task_mem.s.thread_info
+#define init_stack             init_task_mem.stack
 
 union {
        struct {
index 01d877c6868f05e5a8fb72639a980cd4b7ef66dc..cf3023dced49d7fbe55ab50a8f357b95e5f5194a 100644 (file)
@@ -8,12 +8,13 @@
 
 #include <asm/processor.h>
 
-static void putc(char c);
+static void m32r_putc(char c);
 
 static int puts(const char *s)
 {
        char c;
-       while ((c = *s++)) putc(c);
+       while ((c = *s++))
+               m32r_putc(c);
        return 0;
 }
 
@@ -41,7 +42,7 @@ static int puts(const char *s)
 #define BOOT_SIO0TXB   PLD_ESIO0TXB
 #endif
 
-static void putc(char c)
+static void m32r_putc(char c)
 {
        while ((*BOOT_SIO0STS & 0x3) != 0x3)
                cpu_relax();
@@ -61,7 +62,7 @@ static void putc(char c)
 #define SIO0TXB        (volatile unsigned short *)(0x00efd000 + 30)
 #endif
 
-static void putc(char c)
+static void m32r_putc(char c)
 {
        while ((*SIO0STS & 0x1) == 0)
                cpu_relax();
index fa31be297b85acf1ba31ca4059f2f48825c91d87..73d92ea0ce65b5caca8e79f208995c034872b004 100644 (file)
@@ -288,7 +288,7 @@ _clear_bss:
 #endif
 
        /*
-        *      Assember start up done, start code proper.
+        *      Assembler start up done, start code proper.
         */
        jsr     start_kernel                    /* start Linux kernel */
 
index c525e4c08f8477f45bb0e978b0c436af07471cbc..9abb1a441da082e2fcf8724a0171adec50e4e7b1 100644 (file)
@@ -111,7 +111,7 @@ void __init config_BSP(char *commandp, int size)
 /***************************************************************************/
 
 /*
- * Some 5272 based boards have the FEC ethernet diectly connected to
+ * Some 5272 based boards have the FEC ethernet directly connected to
  * an ethernet switch. In this case we need to use the fixed phy type,
  * and we need to declare it early in boot.
  */
index 821de928dc3f9d14adbb16c1ad70accc9e57f477..6a640be485684f00497a63ee9630bd77df9dbe4a 100644 (file)
@@ -42,7 +42,7 @@ static unsigned long iospace;
 
 /*
  * We need to be carefull probing on bus 0 (directly connected to host
- * bridge). We should only acccess the well defined possible devices in
+ * bridge). We should only access the well defined possible devices in
  * use, ignore aliases and the like.
  */
 static unsigned char mcf_host_slot2sid[32] = {
index 3ee6976f60885e5b2e0e4e2a80df6d824de14694..8f5b6f7dd1366024b973a6eb7846bdd0dd47bd02 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -359,6 +360,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -553,7 +555,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index e96787ffcbced33f9893d2760259177086b13c1f..31bded9c83d45f3a4b6861e2a5a11379324b4209 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -341,6 +342,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -512,7 +514,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 083fe6beac149da81250ca4fccd50798f9e4ef0d..0d7739e04ae2f13d63719db1a45a3a712b668e36 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -350,6 +351,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -533,7 +535,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 475130c06dcba04b646ec2e7c199aee7045dea7d..2cbb5c465fec03487046c08b8aef427a6cc81518 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -340,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 4339658c200f3eb8af4bc2645836e109466055cb..96102a42c156ee2d94fa5e069ba8e8c7f879387d 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -341,6 +342,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -514,7 +516,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 831cc8c3a2e259f67d318257e72bcab84e36b8c1..97d88f7dc5a7bd60db5f05db2483bec15de4723b 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -357,6 +358,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -536,7 +538,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 6377afeb522bbb9a235d1bd57f36cc14ca36ac5b..be25ef208f0f6237f055dea3b6cef375e59ea0a9 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -390,6 +391,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -616,7 +618,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 4304b3d56262bc677383ba4389b65d5e9a7625cd..a008344360c9161183afc063795d16bef1a945ff 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -339,6 +340,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 074bda4094ffd5b0913d4411371e2597774faab2..6735a25f36d446f389a0620faede9d3f37c42c8e 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -340,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 07b9fa8d7f2ea71dd09a26c8d9abe4a519baa07c..780c6e9f6cf9113df4c75ab3a2a8bd3220f741a4 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -346,6 +347,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -527,7 +529,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 36e6fae02d458e2dcb1a96c0caee68a5301f0341..44693cf361e5df9c2600f26d36e033e00e7bf2b2 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -337,6 +338,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -506,7 +508,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 903acf929511e5066403bfdd1c4d78bbc4084c35..ef0071d6115834657ee43e674dbb1b0fd8422482 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -337,6 +338,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -506,7 +508,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 78cb60f5bb4dda18b28276c4b8b79c787e948590..9bbffebe3eb504833ed0937670bd4168751d61a4 100644 (file)
@@ -10191,7 +10191,7 @@ xdnrm_con:
 xdnrm_sd:
        mov.l           %a1,-(%sp)
        tst.b           LOCAL_EX(%a0)           # is denorm pos or neg?
-       smi.b           %d1                     # set d0 accodingly
+       smi.b           %d1                     # set d0 accordingly
        bsr.l           unf_sub
        mov.l           (%sp)+,%a1
 xdnrm_exit:
@@ -10990,7 +10990,7 @@ src_qnan_m:
 # routines where an instruction is selected by an index into
 # a large jump table corresponding to a given instruction which
 # has been decoded. Flow continues here where we now decode
-# further accoding to the source operand type.
+# further according to the source operand type.
 #
 
        global          fsinh
@@ -23196,14 +23196,14 @@ m_sign:
 #
 #  1. Branch on the sign of the adjusted exponent.
 #  2p.(positive exp)
-#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   2. Check M16 and the digits in lwords 2 and 3 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Subtract the count from the exp.
 #   5. Check if the exp has crossed zero in #3 above; make the exp abs
 #         and set SE.
 #      6. Multiply the mantissa by 10**count.
 #  2n.(negative exp)
-#   2. Check the digits in lwords 3 and 2 in decending order.
+#   2. Check the digits in lwords 3 and 2 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Add the count to the exp.
 #   5. Check if the exp has crossed zero in #3 above; clear SE.
index 4aedef973cf6fbd02cc67ea65bcbcede0da21c68..3535e6c87eec611bbe03c965c0fc5aeeaeff5343 100644 (file)
@@ -13156,14 +13156,14 @@ m_sign:
 #
 #  1. Branch on the sign of the adjusted exponent.
 #  2p.(positive exp)
-#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   2. Check M16 and the digits in lwords 2 and 3 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Subtract the count from the exp.
 #   5. Check if the exp has crossed zero in #3 above; make the exp abs
 #         and set SE.
 #      6. Multiply the mantissa by 10**count.
 #  2n.(negative exp)
-#   2. Check the digits in lwords 3 and 2 in decending order.
+#   2. Check the digits in lwords 3 and 2 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Add the count to the exp.
 #   5. Check if the exp has crossed zero in #3 above; clear SE.
index 429fe26e320c9813afdf88011195552498a90032..208b4daa14b334f4ce4365e8eb88ca91ab30b818 100644 (file)
@@ -18,7 +18,7 @@
  * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
  *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  *
- * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
+ * AUG/25/2000 : added support for 8, 16 and 32-bit Single-Address-Mode (K)2000
  *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  *
  * APR/18/2002 : added proper support for MCF5272 DMA controller.
index f186459072e9b0042fbf413d9224367350adcfa6..699f20c8a0fee4fbe6e093ccf0cee8cf042f3cfb 100644 (file)
 /*
  *     I2C module.
  */
-#define MCFI2C_BASE0           (MCF_MBAR + 0x280)      /* Base addreess I2C0 */
+#define MCFI2C_BASE0           (MCF_MBAR + 0x280)      /* Base address I2C0 */
 #define MCFI2C_SIZE0           0x20                    /* Register set size */
 
-#define MCFI2C_BASE1           (MCF_MBAR2 + 0x440)     /* Base addreess I2C1 */
+#define MCFI2C_BASE1           (MCF_MBAR2 + 0x440)     /* Base address I2C1 */
 #define MCFI2C_SIZE1           0x20                    /* Register set size */
 
 /*
index f9924fbcfe42b8f279d83758e4fc987cb32da409..fb95aed5f428110ef54a43ef355c12ab0bf59389 100644 (file)
@@ -14,7 +14,7 @@ extern const char bad_pmd_string[];
 extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
        unsigned long address)
 {
-       unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
+       unsigned long page = __get_free_page(GFP_DMA);
 
        if (!page)
                return NULL;
@@ -51,7 +51,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
 static inline struct page *pte_alloc_one(struct mm_struct *mm,
        unsigned long address)
 {
-       struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
+       struct page *page = alloc_pages(GFP_DMA, 0);
        pte_t *pte;
 
        if (!page)
index 26cc3d5a63f82f2de7021a8a3da28e554f1c815e..8824236e303fe815fe99a24008ba4a7b125fb1a5 100644 (file)
@@ -38,7 +38,7 @@
 /*
  *     MMU Operation register.
  */
-#define        MMUOR_UAA       0x00000001              /* Update allocatiom address */
+#define        MMUOR_UAA       0x00000001              /* Update allocation address */
 #define        MMUOR_ACC       0x00000002              /* TLB access */
 #define        MMUOR_RD        0x00000004              /* TLB access read */
 #define        MMUOR_WR        0x00000000              /* TLB access write */
index 24bcba496c7505e94d5c45512c399e773106b029..c895b987202cd07fbc9f38ad727bf79b53587a29 100644 (file)
@@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
 {
        pte_t *pte;
 
-       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        if (pte) {
                __flush_page_to_ram(pte);
                flush_tlb_kernel_page(pte);
@@ -32,7 +32,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
        struct page *page;
        pte_t *pte;
 
-       page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+       page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
        if(!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
index fc5b36278d040b0620778cbf1f67499f8db5eb6f..c48d21b68f0485fbce8dc5a85bf553b51ac05642 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Q40 master Chip Control
- * RTC stuff merged for compactnes..
+ * RTC stuff merged for compactness.
 */
 
 #ifndef _Q40_MASTER_H
index 0931388de47f600d75bf52a4fd8a60b8117fe1b2..1901f61f926f5ec5b855d51c9b3fea7f835b29da 100644 (file)
@@ -37,7 +37,7 @@ do {                                                  \
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       unsigned long page = __get_free_page(GFP_KERNEL);
 
        if (!page)
                return NULL;
@@ -49,7 +49,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
                                        unsigned long address)
 {
-        struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+        struct page *page = alloc_pages(GFP_KERNEL, 0);
 
        if (page == NULL)
                return NULL;
index 4d2adfb32a2ab4f61951357114a8d053bf74de82..7990b6f50105b19b48417843343778b00c7dc03e 100644 (file)
@@ -60,7 +60,7 @@
  *
  * The host talks to the IOPs using a rather simple message-passing scheme via
  * a shared memory area in the IOP RAM. Each IOP has seven "channels"; each
- * channel is conneced to a specific software driver on the IOP. For example
+ * channel is connected to a specific software driver on the IOP. For example
  * on the SCC IOP there is one channel for each serial port. Each channel has
  * an incoming and and outgoing message queue with a depth of one.
  *
index 759679d9ab96610bec00ec8935063e25f2cfae4d..6d1e760e2a0e21825a4ff54ee83ec7ccca0fa5a3 100644 (file)
@@ -130,7 +130,7 @@ do_fscc=0
        bfextu  %d2{#13,#3},%d0
 .endm
 
-| decode the 8bit diplacement from the brief extension word
+| decode the 8bit displacement from the brief extension word
 .macro fp_decode_disp8
        move.b  %d2,%d0
        ext.w   %d0
index 3104df0a4822fdf766aa8821e3dc17165e5bbc0c..c2caa1ee436057368753bfea9fb839078d41b636 100644 (file)
@@ -42,8 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
-                                             __GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
        return pte;
 }
 
@@ -51,7 +50,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
                                      unsigned long address)
 {
        struct page *pte;
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+       pte = alloc_pages(GFP_KERNEL  | __GFP_ZERO, 0);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
index 61436d69775ca216096cd95169e2bd89eb34b526..7c89390c0c13dc8bfafd35c30bd9fd7372ffa385 100644 (file)
@@ -116,9 +116,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
        struct page *ptepage;
 
 #ifdef CONFIG_HIGHPTE
-       int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+       int flags = GFP_KERNEL | __GFP_HIGHMEM;
 #else
-       int flags = GFP_KERNEL | __GFP_REPEAT;
+       int flags = GFP_KERNEL;
 #endif
 
        ptepage = alloc_pages(flags, 0);
index 4f4520e779a5bf06ccfc69c7b3671961c1967dff..eb99fcc760884e4ee01f1e6bf73a6802a1277169 100644 (file)
@@ -239,8 +239,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
        if (mem_init_done) {
-               pte = (pte_t *)__get_free_page(GFP_KERNEL |
-                                       __GFP_REPEAT | __GFP_ZERO);
+               pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
        } else {
                pte = (pte_t *)early_get_page();
                if (pte)
index 6733ac575da4fd13d7cee0e06e4e45e0c2d9b0a6..36a391d289aa09494b4892f995a3c2f591ed837a 100644 (file)
@@ -74,7 +74,7 @@
 #define KVM_GUEST_KUSEG                        0x00000000UL
 #define KVM_GUEST_KSEG0                        0x40000000UL
 #define KVM_GUEST_KSEG23               0x60000000UL
-#define KVM_GUEST_KSEGX(a)             ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_KSEGX(a)             ((_ACAST32_(a)) & 0xe0000000)
 #define KVM_GUEST_CPHYSADDR(a)         ((_ACAST32_(a)) & 0x1fffffff)
 
 #define KVM_GUEST_CKSEG0ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
@@ -338,6 +338,7 @@ struct kvm_mips_tlb {
 #define KVM_MIPS_GUEST_TLB_SIZE        64
 struct kvm_vcpu_arch {
        void *host_ebase, *guest_ebase;
+       int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
        unsigned long host_stack;
        unsigned long host_gp;
 
index b336037e8768cfd50e45ff877f759a0039bbeb5e..93c079a1cfc8e1f13366590e939c47eade8db789 100644 (file)
@@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
 
        return pte;
 }
@@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+       pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
        if (!pte)
                return NULL;
        clear_highpage(pte);
@@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        pmd_t *pmd;
 
-       pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER);
+       pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
        if (pmd)
                pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
        return pmd;
index a6b611f1da4390356cd3412672cc0b35398bb0e8..7d44e888134f9621d8056ac56fdf6916394619b7 100644 (file)
@@ -24,7 +24,7 @@ struct mm_struct;
 struct vm_area_struct;
 
 #define PAGE_NONE      __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
-                                _CACHE_CACHABLE_NONCOHERENT)
+                                _page_cachable_default)
 #define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
                                 _page_cachable_default)
 #define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
@@ -476,7 +476,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
        pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
        pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
        pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
-       pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
+       pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
        return pte;
 }
 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
@@ -491,7 +491,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 #else
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+                    (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
 }
 #endif
 
@@ -632,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 {
-       pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+       pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
+                      (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
        return pmd;
 }
 
index 396df6eb0a12d9bdb9a886f00be348c4b8698ec2..645c8a1982a7b2ab828f83d1cd34b5bdf2550cb7 100644 (file)
@@ -1636,6 +1636,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                if (index < 0) {
                        vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
                        vcpu->arch.host_cp0_badvaddr = va;
+                       vcpu->arch.pc = curr_pc;
                        er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
                                                         vcpu);
                        preempt_enable();
@@ -1647,6 +1648,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                         * invalid exception to the guest
                         */
                        if (!TLB_IS_VALID(*tlb, va)) {
+                               vcpu->arch.host_cp0_badvaddr = va;
+                               vcpu->arch.pc = curr_pc;
                                er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
                                                                run, vcpu);
                                preempt_enable();
@@ -1666,7 +1669,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                        cache, op, base, arch->gprs[base], offset);
                er = EMULATE_FAIL;
                preempt_enable();
-               goto dont_update_pc;
+               goto done;
 
        }
 
@@ -1694,16 +1697,20 @@ skip_fault:
                kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
                        cache, op, base, arch->gprs[base], offset);
                er = EMULATE_FAIL;
-               preempt_enable();
-               goto dont_update_pc;
        }
 
        preempt_enable();
+done:
+       /* Rollback PC only if emulation was unsuccessful */
+       if (er == EMULATE_FAIL)
+               vcpu->arch.pc = curr_pc;
 
 dont_update_pc:
-       /* Rollback PC */
-       vcpu->arch.pc = curr_pc;
-done:
+       /*
+        * This is for exceptions whose emulation updates the PC, so do not
+        * overwrite the PC under any circumstances
+        */
+
        return er;
 }
 
index 4ab4bdfad703e5f19ed1f2db2c10bbece179f259..2143884709e4761ca440cab2944709f1e85661f5 100644 (file)
@@ -28,6 +28,7 @@
 #define MIPS_EXC_MAX                12
 /* XXXSL More to follow */
 
+extern char __kvm_mips_vcpu_run_end[];
 extern char mips32_exception[], mips32_exceptionEnd[];
 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
 
index 3ef03009de5f65fa4807cac71bb9749baaa63ab7..828fcfc1cd7fe9037dc70c1828d4630ac9839c33 100644 (file)
@@ -202,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1)
 
        /* Jump to guest */
        eret
+EXPORT(__kvm_mips_vcpu_run_end)
 
 VECTOR(MIPSX(exception), unknown)
 /* Find out what mode we came from and jump to the proper handler. */
index dc052fb5c7a2f80cdc20b7bf282b0cb09fc30a2e..44da5259f39027e9bd8d0e6584c864997d3388e9 100644 (file)
@@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
        memcpy(gebase + offset, mips32_GuestException,
               mips32_GuestExceptionEnd - mips32_GuestException);
 
+#ifdef MODULE
+       offset += mips32_GuestExceptionEnd - mips32_GuestException;
+       memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
+              __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
+       vcpu->arch.vcpu_run = gebase + offset;
+#else
+       vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
+#endif
+
        /* Invalidate the icache for these ranges */
        local_flush_icache_range((unsigned long)gebase,
                                (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
@@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        /* Disable hardware page table walking while in guest */
        htw_stop();
 
-       r = __kvm_mips_vcpu_run(run, vcpu);
+       r = vcpu->arch.vcpu_run(run, vcpu);
 
        /* Re-enable HTW before enabling interrupts */
        htw_start();
index 4861a78c7160f7b1912804439307b77599317619..f5f90bbf019d6842c0cf2cb2b4ae11fcdc362b07 100644 (file)
@@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void)
 }
 
 #ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti);
+void arch_release_thread_stack(unsigned long *stack);
 #endif
 #define get_thread_info(ti)    get_task_struct((ti)->task)
 #define put_thread_info(ti)    put_task_struct((ti)->task)
index 99770823451a7e8b1fc2552c948e8f651951e334..2d7986c386fe5964a51d5eddec5cfac578fbb0e4 100644 (file)
@@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
  * single-step state is cleared.  At this point the breakpoints should have
  * been removed by __switch_to().
  */
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_stack(unsigned long *stack)
 {
+       struct thread_info *ti = (void *)stack;
        if (kgdb_sstep_thread == ti) {
                kgdb_sstep_thread = NULL;
 
index e77a7c728081125233ebf569dac465960a997aff..9577cf768875c8bd207b1ceb6f1ba8ddb8d943a3 100644 (file)
@@ -63,7 +63,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
        if (pte)
                clear_page(pte);
        return pte;
@@ -74,9 +74,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
        struct page *pte;
 
 #ifdef CONFIG_HIGHPTE
-       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
 #else
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL, 0);
 #endif
        if (!pte)
                return NULL;
index 6e2985e0a7b90284641d41a839b34df22d8864fd..bb47d08c8ef76f085676fe9ce7d26810a90fffa7 100644 (file)
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
-                                       PTE_ORDER);
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
 
        return pte;
 }
@@ -53,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+       pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
        if (pte) {
                if (!pgtable_page_ctor(pte)) {
                        __free_page(pte);
index 21484e5b9e9af1cec95e2d786d87816caccb13e4..87eebd18508953733ddc5cd9ed1f387b7abfb88d 100644 (file)
@@ -77,7 +77,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
                                         unsigned long address)
 {
        struct page *pte;
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL, 0);
        if (!pte)
                return NULL;
        clear_page(page_address(pte));
index 62b08ef392be2c202f5ed73326fe708b9eda8e41..5b2a95116e8fa0e3d3fc95d3094bfdcc682b47d9 100644 (file)
@@ -122,7 +122,7 @@ pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
        pte_t *pte;
 
        if (likely(mem_init_done)) {
-               pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+               pte = (pte_t *) __get_free_page(GFP_KERNEL);
        } else {
                pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
 #if 0
index f2fd327dce2edc14db5717211fe27943e957fcf0..f08dda3f0995e41841c0555655a51d16bdc91b97 100644 (file)
@@ -63,8 +63,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
-                                              PMD_ORDER);
+       pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
        if (pmd)
                memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
        return pmd;
@@ -124,7 +123,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 static inline pgtable_t
 pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
@@ -137,7 +136,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
 static inline pte_t *
 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
index 4736020ba5eabeb05841e5a9dc4834026f61a92d..5e953ab4530dbd2a5cb1b9cff2257e0bee8dba55 100644 (file)
@@ -8,6 +8,8 @@ struct pt_regs;
 void parisc_terminate(char *msg, struct pt_regs *regs,
                int code, unsigned long offset) __noreturn __cold;
 
+void die_if_kernel(char *str, struct pt_regs *regs, long err);
+
 /* mm/fault.c */
 void do_page_fault(struct pt_regs *regs, unsigned long code,
                unsigned long address);
index e81ccf1716e913e21af99a6c7ff24f8a7148b38b..5adc339eb7c8dab935fbbc933a64e811f6800ccc 100644 (file)
@@ -324,8 +324,9 @@ int init_per_cpu(int cpunum)
                per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
                per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
 
-               printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
-                       cpunum, coproc_cfg.revision, coproc_cfg.model);
+               if (cpunum == 0)
+                       printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
+                               cpunum, coproc_cfg.revision, coproc_cfg.model);
 
                /*
                ** store status register to stack (hopefully aligned)
index 58dd6801f5bece511f16603b7db47906637582b0..31ec99a5f1196bbad6ce28aabcba8d111a63644f 100644 (file)
@@ -309,11 +309,6 @@ void __init time_init(void)
        clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
                                NSEC_PER_MSEC, 0);
 
-#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
-       /* At bootup only one 64bit CPU is online and cr16 is "stable" */
-       set_sched_clock_stable();
-#endif
-
        start_cpu_itimer();     /* get CPU 0 started */
 
        /* register at clocksource framework */
index d7c0acb35ec248c51329a5cb4594189bd5f5040f..2b65c01777789f922d13a1e165d937e5e464e87a 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/ratelimit.h>
 #include <asm/uaccess.h>
 #include <asm/hardirq.h>
+#include <asm/traps.h>
 
 /* #define DEBUG_UNALIGNED 1 */
 
 
 int unaligned_enabled __read_mostly = 1;
 
-void die_if_kernel (char *str, struct pt_regs *regs, long err);
-
 static int emulate_ldh(struct pt_regs *regs, int toreg)
 {
        unsigned long saddr = regs->ior;
@@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs)
                break;
        }
 
-       if (modify && R1(regs->iir))
+       if (ret == 0 && modify && R1(regs->iir))
                regs->gr[R1(regs->iir)] = newbase;
 
 
@@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs)
 
        if (ret)
        {
+               /*
+                * The unaligned handler failed.
+                * If we were called by __get_user() or __put_user() jump
+                * to it's exception fixup handler instead of crashing.
+                */
+               if (!user_mode(regs) && fixup_exception(regs))
+                       return;
+
                printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
                die_if_kernel("Unaligned data reference", regs, 28);
 
index ddd988b267a9ddde0e9d2433f87e036f41e9d2a1..e278a87f43ccb467245533afb781c0117675fd07 100644 (file)
@@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr)
        if (addr >= kernel_unwind_table.start && 
            addr <= kernel_unwind_table.end)
                e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
-       else 
+       else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&unwind_lock, flags);
                list_for_each_entry(table, &unwind_tables, list) {
                        if (addr >= table->start && 
                            addr <= table->end)
@@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr)
                                break;
                        }
                }
+               spin_unlock_irqrestore(&unwind_lock, flags);
+       }
 
        return e;
 }
@@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
 
                        insn = *(unsigned int *)npc;
 
-                       if ((insn & 0xffffc000) == 0x37de0000 ||
-                           (insn & 0xffe00000) == 0x6fc00000) {
+                       if ((insn & 0xffffc001) == 0x37de0000 ||
+                           (insn & 0xffe00001) == 0x6fc00000) {
                                /* ldo X(sp), sp, or stwm X,D(sp) */
-                               frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
-                                       ((insn & 0x3fff) >> 1);
+                               frame_size += (insn & 0x3fff) >> 1;
                                dbg("analyzing func @ %lx, insn=%08x @ "
                                    "%lx, frame_size = %ld\n", info->ip,
                                    insn, npc, frame_size);
-                       } else if ((insn & 0xffe00008) == 0x73c00008) {
+                       } else if ((insn & 0xffe00009) == 0x73c00008) {
                                /* std,ma X,D(sp) */
-                               frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
-                                       (((insn >> 4) & 0x3ff) << 3);
+                               frame_size += ((insn >> 4) & 0x3ff) << 3;
                                dbg("analyzing func @ %lx, insn=%08x @ "
                                    "%lx, frame_size = %ld\n", info->ip,
                                    insn, npc, frame_size);
@@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
                        }
                }
 
+               if (frame_size > e->Total_frame_size << 3)
+                       frame_size = e->Total_frame_size << 3;
+
                if (!unwind_special(info, e->region_start, frame_size)) {
                        info->prev_sp = info->sp - frame_size;
                        if (e->Millicode)
index 01f7464d9fea68033b7ef8876e22e0352e365597..0a9d439bcda6c1ade0025d5e894e14d2db4b24be 100644 (file)
@@ -128,7 +128,7 @@ config PPC
        select IRQ_FORCED_THREADING
        select HAVE_RCU_TABLE_FREE if SMP
        select HAVE_SYSCALL_TRACEPOINTS
-       select HAVE_CBPF_JIT
+       select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
        select HAVE_ARCH_JUMP_LABEL
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_HAS_GCOV_PROFILE_ALL
index a2350194fc7640bf96a3ec42f12085efc63a5711..8e21bb492dca038358f6ca7a10d69a783cd6364c 100644 (file)
@@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       tlb_flush_pgtable(tlb, address);
        pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
index 290157e8d5b2498bce2d6ffb14261124d47a25a7..74839f24f4124614eb23ed8dd2c7098ad5f6eb29 100644 (file)
@@ -88,6 +88,7 @@
 #define HPTE_R_RPN_SHIFT       12
 #define HPTE_R_RPN             ASM_CONST(0x0ffffffffffff000)
 #define HPTE_R_PP              ASM_CONST(0x0000000000000003)
+#define HPTE_R_PPP             ASM_CONST(0x8000000000000003)
 #define HPTE_R_N               ASM_CONST(0x0000000000000004)
 #define HPTE_R_G               ASM_CONST(0x0000000000000008)
 #define HPTE_R_M               ASM_CONST(0x0000000000000010)
index 488279edb1f045e00cb8bf2261367d319ad862ea..cd5e7aa8cc3489e9f44540e66cafc852bb67d1b7 100644 (file)
@@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
                        pgtable_cache[(shift) - 1];     \
                })
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
 
 extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
 extern void pte_fragment_free(unsigned long *, int);
@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
        return (pgd_t *)__get_free_page(PGALLOC_GFP);
 #else
        struct page *page;
-       page = alloc_pages(PGALLOC_GFP, 4);
+       page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
        if (!page)
                return NULL;
        return (pgd_t *) page_address(page);
@@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -110,13 +109,17 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
                                   unsigned long address)
 {
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
         pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
 }
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -127,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
                                   unsigned long address)
 {
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
         return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
 }
 
@@ -151,7 +159,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -198,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       tlb_flush_pgtable(tlb, address);
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
        pgtable_free_tlb(tlb, table, 0);
 }
 
index 88a5ecaa157b5a2fd474f23759e11e2764f973e5..ab84c89c9e982129c3cde79792548c6078d42065 100644 (file)
@@ -230,6 +230,7 @@ extern unsigned long __kernel_virt_size;
 #define KERN_VIRT_SIZE  __kernel_virt_size
 extern struct page *vmemmap;
 extern unsigned long ioremap_bot;
+extern unsigned long pci_io_base;
 #endif /* __ASSEMBLY__ */
 
 #include <asm/book3s/64/hash.h>
index 937d4e247ac31aeb614de3ba59c7e21900dac1cc..df294224e28020cfd438155c6733132b8c906a07 100644 (file)
@@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
 
 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                                 pgprot_t flags, unsigned int psz);
+
+static inline unsigned long radix__get_tree_size(void)
+{
+       unsigned long rts_field;
+       /*
+        * we support 52 bits, hence 52-31 = 21, 0b10101
+        * RTS encoding details
+        * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
+        * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
+        */
+       rts_field = (0x5UL << 5); /* 6 - 8 bits */
+       rts_field |= (0x2UL << 61);
+
+       return rts_field;
+}
 #endif /* __ASSEMBLY__ */
 #endif
index 13ef38828dfee2fec57551bf2154aabbac3422a4..3fa94fcac6284e20e51566367b58c027ef2e3716 100644 (file)
@@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
 extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                                    unsigned long ap, int nid);
+extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
 extern void radix__tlb_flush(struct mmu_gather *tlb);
 #ifdef CONFIG_SMP
 extern void radix__flush_tlb_mm(struct mm_struct *mm);
 extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                              unsigned long ap, int nid);
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
 #else
 #define radix__flush_tlb_mm(mm)                radix__local_flush_tlb_mm(mm)
 #define radix__flush_tlb_page(vma,addr)        radix__local_flush_tlb_page(vma,addr)
 #define radix___flush_tlb_page(mm,addr,p,i)    radix___local_flush_tlb_page(mm,addr,p,i)
+#define radix__flush_tlb_pwc(tlb, addr)        radix__local_flush_tlb_pwc(tlb, addr)
 #endif
 
 #endif
index d98424ae356c49356c459334468c488b786b9be8..96e5769b18b00fe8fbcd7d788c3c94457d15a98a 100644 (file)
@@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 #define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
 #define flush_tlb_page(vma, addr)      local_flush_tlb_page(vma, addr)
 #endif /* CONFIG_SMP */
+/*
+ * flush the page walk cache for the address
+ */
+static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
+{
+       /*
+        * Flush the page table walk cache on freeing a page table. We already
+        * have marked the upper/higher level page table entry none by now.
+        * So it is safe to flush PWC here.
+        */
+       if (!radix_enabled())
+               return;
 
+       radix__flush_tlb_pwc(tlb, address);
+}
 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
index 54f591e9572eeb4ea6c3ad99d4f397034e6acbe9..c0a69ae922567b5fc819069e7e7d150992417866 100644 (file)
@@ -4,11 +4,6 @@
 #include <linux/mm.h>
 
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
-                                    unsigned long address)
-{
-
-}
 
 #ifdef CONFIG_PPC64
 #include <asm/book3s/64/pgalloc.h>
index 0c12a3bfe2ab10d49f76e24c4d1dbaa59096b2b0..897d2e1c8a9b84ffa96cc283963e9d322d276ad0 100644 (file)
@@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -88,7 +87,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -172,7 +171,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 
 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-       pte_fragment_fre((unsigned long *)pte, 1);
+       pte_fragment_free((unsigned long *)pte, 1);
 }
 
 static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
@@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index c1e82e968506302dbbf81ddf4345fc4bf26b15c5..a0948f40bc7bc96d60cf15d93fd252ef6d3d01f4 100644 (file)
 #define   MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
 #define   MMCR0_FCHV   0x00000001UL /* freeze conditions in hypervisor mode */
 #define SPRN_MMCR1     798
-#define SPRN_MMCR2     769
+#define SPRN_MMCR2     785
 #define SPRN_MMCRA     0x312
 #define   MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
 #define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
 #define SPRN_PMC6      792
 #define SPRN_PMC7      793
 #define SPRN_PMC8      794
-#define SPRN_SIAR      780
-#define SPRN_SDAR      781
 #define SPRN_SIER      784
 #define   SIER_SIPR            0x2000000       /* Sampled MSR_PR */
 #define   SIER_SIHV            0x1000000       /* Sampled MSR_HV */
 #define   SIER_SIAR_VALID      0x0400000       /* SIAR contents valid */
 #define   SIER_SDAR_VALID      0x0200000       /* SDAR contents valid */
+#define SPRN_SIAR      796
+#define SPRN_SDAR      797
 #define SPRN_TACR      888
 #define SPRN_TCSCR     889
 #define SPRN_CSIGR     890
index 2714a3b81d24476ad07d651c64720f42ab96bcec..d70101e1e25c1ac902996b2ba29bb8aa6f3f335e 100644 (file)
@@ -642,13 +642,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
                if (pe->type & EEH_PE_VF) {
                        eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
                } else {
-                       eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                        pci_lock_rescan_remove();
                        pci_hp_remove_devices(bus);
                        pci_unlock_rescan_remove();
                }
        } else if (frozen_bus) {
-               eeh_pe_dev_traverse(pe, eeh_rmv_device, &rmv_data);
+               eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
        }
 
        /*
@@ -692,10 +691,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
                 */
                edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
                eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
-               if (pe->type & EEH_PE_VF)
+               if (pe->type & EEH_PE_VF) {
                        eeh_add_virt_device(edev, NULL);
-               else
+               } else {
+                       eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                        pci_hp_add_devices(bus);
+               }
        } else if (frozen_bus && rmv_data->removed) {
                pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
                ssleep(5);
index 4c9440629128ccd94dbc35d5eb0b6e060d4d970f..8bcc1b457115867e93ace2317f203e1cda100352 100644 (file)
@@ -1399,11 +1399,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
 
        mtlr    r10
-BEGIN_MMU_FTR_SECTION
-       b       2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
        andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
+BEGIN_MMU_FTR_SECTION
        beq-    2f
+FTR_SECTION_ELSE
+       b       2f
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
 
 .machine       push
 .machine       "power4"
index 3759df52bd671d883c38aec23fe4b133e6c0d0f7..a5ae49a2dcc47a12163e6ec6ae50ff85d3bd2bb4 100644 (file)
@@ -47,7 +47,6 @@ static int __init pcibios_init(void)
 
        printk(KERN_INFO "PCI: Probing PCI hardware\n");
 
-       pci_io_base = ISA_IO_BASE;
        /* For now, override phys_mem_access_prot. If we need it,g
         * later, we may move that initialization to each ppc_md
         */
index e2f12cbcade9a49287c370204bcc71a4fb23b33a..0b93893424f5b2fe8246694e5e5e31c7b5324a0f 100644 (file)
@@ -1505,6 +1505,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
                current->thread.regs = regs - 1;
        }
 
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+       /*
+        * Clear any transactional state, we're exec()ing. The cause is
+        * not important as there will never be a recheckpoint so it's not
+        * user visible.
+        */
+       if (MSR_TM_SUSPENDED(mfmsr()))
+               tm_reclaim_current(0);
+#endif
+
        memset(regs->gpr, 0, sizeof(regs->gpr));
        regs->ctr = 0;
        regs->link = 0;
index da5192590c445f89c2a8466128b570c87a133470..6ee4b72cda4201840cf2b85f38661831c5e37233 100644 (file)
@@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = {
        W(0xffff0000), W(0x003e0000),   /* POWER6 */
        W(0xffff0000), W(0x003f0000),   /* POWER7 */
        W(0xffff0000), W(0x004b0000),   /* POWER8E */
+       W(0xffff0000), W(0x004c0000),   /* POWER8NVL */
        W(0xffff0000), W(0x004d0000),   /* POWER8 */
        W(0xffffffff), W(0x0f000004),   /* all 2.07-compliant */
        W(0xffffffff), W(0x0f000003),   /* all 2.06-compliant */
@@ -718,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
         * must match by the macro below. Update the definition if
         * the structure layout changes.
         */
-#define IBM_ARCH_VEC_NRCORES_OFFSET    125
+#define IBM_ARCH_VEC_NRCORES_OFFSET    133
        W(NR_CPUS),                     /* number of cores supported */
        0,
        0,
index 30a03c03fe734a8c80828fc4260fe3679ab63e85..060b140f03c69de424e40e4dcdb5064d832ce889 100644 (file)
@@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
 
 #else
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
-                    offsetof(struct thread_fp_state, fpr[32][0]));
+                    offsetof(struct thread_fp_state, fpr[32]));
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                   &target->thread.fp_state, 0, -1);
@@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
        return 0;
 #else
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
-                    offsetof(struct thread_fp_state, fpr[32][0]));
+                    offsetof(struct thread_fp_state, fpr[32]));
 
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                  &target->thread.fp_state, 0, -1);
index bf8f34a5867088d1a2346d17b89e2daaabe365ab..b7019b559ddbffd817563e6f0d3ae5f3d134b2ca 100644 (file)
@@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
        std     r3, STK_PARAM(R3)(r1)
        SAVE_NVGPRS(r1)
 
-       /* We need to setup MSR for VSX register save instructions.  Here we
-        * also clear the MSR RI since when we do the treclaim, we won't have a
-        * valid kernel pointer for a while.  We clear RI here as it avoids
-        * adding another mtmsr closer to the treclaim.  This makes the region
-        * maked as non-recoverable wider than it needs to be but it saves on
-        * inserting another mtmsrd later.
-        */
+       /* We need to setup MSR for VSX register save instructions. */
        mfmsr   r14
        mr      r15, r14
        ori     r15, r15, MSR_FP
-       li      r16, MSR_RI
+       li      r16, 0
        ori     r16, r16, MSR_EE /* IRQs hard off */
        andc    r15, r15, r16
        oris    r15, r15, MSR_VEC@h
@@ -176,7 +170,17 @@ dont_backup_fp:
 1:     tdeqi   r6, 0
        EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
 
-       /* The moment we treclaim, ALL of our GPRs will switch
+       /* Clear MSR RI since we are about to change r1, EE is already off. */
+       li      r4, 0
+       mtmsrd  r4, 1
+
+       /*
+        * BE CAREFUL HERE:
+        * At this point we can't take an SLB miss since we have MSR_RI
+        * off. Load only to/from the stack/paca which are in SLB bolted regions
+        * until we turn MSR RI back on.
+        *
+        * The moment we treclaim, ALL of our GPRs will switch
         * to user register state.  (FPRs, CCR etc. also!)
         * Use an sprg and a tm_scratch in the PACA to shuffle.
         */
@@ -197,6 +201,11 @@ dont_backup_fp:
 
        /* Store the PPR in r11 and reset to decent value */
        std     r11, GPR11(r1)                  /* Temporary stash */
+
+       /* Reset MSR RI so we can take SLB faults again */
+       li      r11, MSR_RI
+       mtmsrd  r11, 1
+
        mfspr   r11, SPRN_PPR
        HMT_MEDIUM
 
@@ -397,11 +406,6 @@ restore_gprs:
        ld      r5, THREAD_TM_DSCR(r3)
        ld      r6, THREAD_TM_PPR(r3)
 
-       /* Clear the MSR RI since we are about to change R1.  EE is already off
-        */
-       li      r4, 0
-       mtmsrd  r4, 1
-
        REST_GPR(0, r7)                         /* GPR0 */
        REST_2GPRS(2, r7)                       /* GPR2-3 */
        REST_GPR(4, r7)                         /* GPR4 */
@@ -439,10 +443,33 @@ restore_gprs:
        ld      r6, _CCR(r7)
        mtcr    r6
 
-       REST_GPR(1, r7)                         /* GPR1 */
-       REST_GPR(5, r7)                         /* GPR5-7 */
        REST_GPR(6, r7)
-       ld      r7, GPR7(r7)
+
+       /*
+        * Store r1 and r5 on the stack so that we can access them
+        * after we clear MSR RI.
+        */
+
+       REST_GPR(5, r7)
+       std     r5, -8(r1)
+       ld      r5, GPR1(r7)
+       std     r5, -16(r1)
+
+       REST_GPR(7, r7)
+
+       /* Clear MSR RI since we are about to change r1. EE is already off */
+       li      r5, 0
+       mtmsrd  r5, 1
+
+       /*
+        * BE CAREFUL HERE:
+        * At this point we can't take an SLB miss since we have MSR_RI
+        * off. Load only to/from the stack/paca which are in SLB bolted regions
+        * until we turn MSR RI back on.
+        */
+
+       ld      r5, -8(r1)
+       ld      r1, -16(r1)
 
        /* Commit register state as checkpointed state: */
        TRECHKPT
index d873f6507f7210fe4b9a7caa774861b9648ba5aa..f8a871a72985c1d758d21c7f406210ce491db3b5 100644 (file)
@@ -316,8 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
                        DBG_LOW(" -> hit\n");
                        /* Update the HPTE */
                        hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
-                                               ~(HPTE_R_PP | HPTE_R_N)) |
-                                              (newpp & (HPTE_R_PP | HPTE_R_N |
+                                               ~(HPTE_R_PPP | HPTE_R_N)) |
+                                              (newpp & (HPTE_R_PPP | HPTE_R_N |
                                                         HPTE_R_C)));
                }
                native_unlock_hpte(hptep);
@@ -385,8 +385,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
 
        /* Update the HPTE */
        hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
-                       ~(HPTE_R_PP | HPTE_R_N)) |
-               (newpp & (HPTE_R_PP | HPTE_R_N)));
+                               ~(HPTE_R_PPP | HPTE_R_N)) |
+                              (newpp & (HPTE_R_PPP | HPTE_R_N)));
        /*
         * Ensure it is out of the tlb too. Bolted entries base and
         * actual page size will be same.
@@ -550,7 +550,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
                }
        }
        /* This works for all page sizes, and for 256M and 1T segments */
-       *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+       if (cpu_has_feature(CPU_FTR_ARCH_300))
+               *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT;
+       else
+               *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
+
        shift = mmu_psize_defs[size].shift;
 
        avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
index 59268969a0bc7b0d02e8c10b28f5b214f6209006..2971ea18c768bcda67c36874b4fba274f607abc5 100644 (file)
@@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
        },
 };
 
+/*
+ * 'R' and 'C' update notes:
+ *  - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
+ *     create writeable HPTEs without C set, because the hcall H_PROTECT
+ *     that we use in that case will not update C
+ *  - The above is however not a problem, because we also don't do that
+ *     fancy "no flush" variant of eviction and we use H_REMOVE which will
+ *     do the right thing and thus we don't have the race I described earlier
+ *
+ *    - Under bare metal,  we do have the race, so we need R and C set
+ *    - We make sure R is always set and never lost
+ *    - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
+ */
 unsigned long htab_convert_pte_flags(unsigned long pteflags)
 {
        unsigned long rflags = 0;
@@ -186,19 +199,28 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
                        rflags |= 0x1;
        }
        /*
-        * Always add "C" bit for perf. Memory coherence is always enabled
+        * We can't allow hardware to update hpte bits. Hence always
+        * set 'R' bit and set 'C' if it is a write fault
         */
-       rflags |=  HPTE_R_C | HPTE_R_M;
+       rflags |=  HPTE_R_R;
+
+       if (pteflags & _PAGE_DIRTY)
+               rflags |= HPTE_R_C;
        /*
         * Add in WIG bits
         */
 
        if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
                rflags |= HPTE_R_I;
-       if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
+       else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
                rflags |= (HPTE_R_I | HPTE_R_G);
-       if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
-               rflags |= (HPTE_R_I | HPTE_R_W);
+       else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+               rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
+       else
+               /*
+                * Add memory coherence if cache inhibited is not set
+                */
+               rflags |= HPTE_R_M;
 
        return rflags;
 }
@@ -900,6 +922,10 @@ void __init hash__early_init_mmu(void)
        vmemmap = (struct page *)H_VMEMMAP_BASE;
        ioremap_bot = IOREMAP_BASE;
 
+#ifdef CONFIG_PCI
+       pci_io_base = ISA_IO_BASE;
+#endif
+
        /* Initialize the MMU Hash table and create the linear mapping
         * of memory. Has to be done before SLB initialization as this is
         * currently where the page size encoding is obtained.
index 5aac1a3f86cde4471ba3460c3b08d543a43fd0e4..119d18611500f4ccde99916b1012bfba0b9f1039 100644 (file)
@@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
        cachep = PGT_CACHE(pdshift - pshift);
 #endif
 
-       new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
+       new = kmem_cache_zalloc(cachep, GFP_KERNEL);
 
        BUG_ON(pshift > HUGEPD_SHIFT_MASK);
        BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
index 227b2a6c4544b1739813bf192e3a156bed7e1ffd..196222227e8207ca7b71ae28e2f8e8a0ffb1565e 100644 (file)
@@ -65,7 +65,7 @@ static int radix__init_new_context(struct mm_struct *mm, int index)
        /*
         * set the process table entry,
         */
-       rts_field = 3ull << PPC_BITLSHIFT(2);
+       rts_field = radix__get_tree_size();
        process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
        return 0;
 }
index eb44511447462e41c36ce25b03be1026edca4a56..670318766545c6fb029e4acfb7711be5ca7fe90d 100644 (file)
@@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
        changed = !pmd_same(*(pmdp), entry);
        if (changed) {
                __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
-               /*
-                * Since we are not supporting SW TLB systems, we don't
-                * have any thing similar to flush_tlb_page_nohash()
-                */
+               flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        }
        return changed;
 }
index 18b2c11604fa7804f7e7101a9a4dee9204f5f62e..7931e1496f0d59d2ab5c015a48824d15e5fb3f05 100644 (file)
@@ -160,9 +160,8 @@ redo:
        process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
        /*
         * Fill in the process table.
-        * we support 52 bits, hence 52-28 = 24, 11000
         */
-       rts_field = 3ull << PPC_BITLSHIFT(2);
+       rts_field = radix__get_tree_size();
        process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
        /*
         * Fill in the partition table. We are suppose to use effective address
@@ -176,10 +175,8 @@ redo:
 static void __init radix_init_partition_table(void)
 {
        unsigned long rts_field;
-       /*
-        * we support 52 bits, hence 52-28 = 24, 11000
-        */
-       rts_field = 3ull << PPC_BITLSHIFT(2);
+
+       rts_field = radix__get_tree_size();
 
        BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
        partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
@@ -296,11 +293,6 @@ found:
 void __init radix__early_init_mmu(void)
 {
        unsigned long lpcr;
-       /*
-        * setup LPCR UPRT based on mmu_features
-        */
-       lpcr = mfspr(SPRN_LPCR);
-       mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
 
 #ifdef CONFIG_PPC_64K_PAGES
        /* PAGE_SIZE mappings */
@@ -336,6 +328,11 @@ void __init radix__early_init_mmu(void)
        __vmalloc_end = RADIX_VMALLOC_END;
        vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
        ioremap_bot = IOREMAP_BASE;
+
+#ifdef CONFIG_PCI
+       pci_io_base = ISA_IO_BASE;
+#endif
+
        /*
         * For now radix also use the same frag size
         */
@@ -343,8 +340,11 @@ void __init radix__early_init_mmu(void)
        __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
 
        radix_init_page_sizes();
-       if (!firmware_has_feature(FW_FEATURE_LPAR))
+       if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+               lpcr = mfspr(SPRN_LPCR);
+               mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
                radix_init_partition_table();
+       }
 
        radix_init_pgtable();
 }
@@ -353,16 +353,15 @@ void radix__early_init_mmu_secondary(void)
 {
        unsigned long lpcr;
        /*
-        * setup LPCR UPRT based on mmu_features
-        */
-       lpcr = mfspr(SPRN_LPCR);
-       mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
-       /*
-        * update partition table control register, 64 K size.
+        * update partition table control register and UPRT
         */
-       if (!firmware_has_feature(FW_FEATURE_LPAR))
+       if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+               lpcr = mfspr(SPRN_LPCR);
+               mtspr(SPRN_LPCR, lpcr | LPCR_UPRT);
+
                mtspr(SPRN_PTCR,
                      __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
+       }
 }
 
 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
index bf7bf32b54f8a7ca025c06ca7e74e3f0d78f1d70..7f922f5579365f2a3b3b9404cc7b22d855f587bf 100644 (file)
@@ -84,7 +84,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
        pte_t *pte;
 
        if (slab_is_available()) {
-               pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+               pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        } else {
                pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
                if (pte)
@@ -97,7 +97,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *ptepage;
 
-       gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+       gfp_t flags = GFP_KERNEL | __GFP_ZERO;
 
        ptepage = alloc_pages(flags, 0);
        if (!ptepage)
index e009e0604a8ab76f7b9b1efd2004c8c45f210c6c..f5e8d4edb808f8748d8eaf630f027f0d5796b344 100644 (file)
@@ -350,8 +350,7 @@ static pte_t *get_from_cache(struct mm_struct *mm)
 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
 {
        void *ret = NULL;
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-                                      __GFP_REPEAT | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        if (!page)
                return NULL;
        if (!kernel && !pgtable_page_ctor(page)) {
index 0fdaf93a3e091be64d827bd376f651a6c3bab8f0..ab2f60e812e2826badd47b0f03fd0eee12ed0051 100644 (file)
 
 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
-static inline void __tlbiel_pid(unsigned long pid, int set)
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
+static inline void __tlbiel_pid(unsigned long pid, int set,
+                               unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = PPC_BIT(53); /* IS = 1 */
        rb |= set << PPC_BITLSHIFT(51);
        rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 2;  /* invalidate all the caches */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
 /*
  * We use 128 set in radix mode and 256 set in hpt mode.
  */
-static inline void _tlbiel_pid(unsigned long pid)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
 {
        int set;
 
        for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
-               __tlbiel_pid(pid, set);
+               __tlbiel_pid(pid, set, ric);
        }
        return;
 }
 
-static inline void _tlbie_pid(unsigned long pid)
+static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = PPC_BIT(53); /* IS = 1 */
        rs = pid << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 2;  /* invalidate all the caches */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
 }
 
 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
-                             unsigned long ap)
+                             unsigned long ap, unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = va & ~(PPC_BITMASK(52, 63));
        rb |= ap << PPC_BITLSHIFT(58);
        rs = pid << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 0;  /* no cluster flush yet */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
 }
 
 static inline void _tlbie_va(unsigned long va, unsigned long pid,
-                            unsigned long ap)
+                            unsigned long ap, unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = va & ~(PPC_BITMASK(52, 63));
        rb |= ap << PPC_BITLSHIFT(58);
        rs = pid << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 0;  /* no cluster flush yet */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -117,25 +118,40 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
  */
 void radix__local_flush_tlb_mm(struct mm_struct *mm)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm->context.id;
        if (pid != MMU_NO_CONTEXT)
-               _tlbiel_pid(pid);
+               _tlbiel_pid(pid, RIC_FLUSH_ALL);
        preempt_enable();
 }
 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
 
+void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+       unsigned long pid;
+       struct mm_struct *mm = tlb->mm;
+
+       preempt_disable();
+
+       pid = mm->context.id;
+       if (pid != MMU_NO_CONTEXT)
+               _tlbiel_pid(pid, RIC_FLUSH_PWC);
+
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
+
 void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                            unsigned long ap, int nid)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm ? mm->context.id : 0;
        if (pid != MMU_NO_CONTEXT)
-               _tlbiel_va(vmaddr, pid, ap);
+               _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
        preempt_enable();
 }
 
@@ -160,7 +176,7 @@ static int mm_is_core_local(struct mm_struct *mm)
 
 void radix__flush_tlb_mm(struct mm_struct *mm)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm->context.id;
@@ -172,20 +188,46 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
 
                if (lock_tlbie)
                        raw_spin_lock(&native_tlbie_lock);
-               _tlbie_pid(pid);
+               _tlbie_pid(pid, RIC_FLUSH_ALL);
                if (lock_tlbie)
                        raw_spin_unlock(&native_tlbie_lock);
        } else
-               _tlbiel_pid(pid);
+               _tlbiel_pid(pid, RIC_FLUSH_ALL);
 no_context:
        preempt_enable();
 }
 EXPORT_SYMBOL(radix__flush_tlb_mm);
 
+void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+       unsigned long pid;
+       struct mm_struct *mm = tlb->mm;
+
+       preempt_disable();
+
+       pid = mm->context.id;
+       if (unlikely(pid == MMU_NO_CONTEXT))
+               goto no_context;
+
+       if (!mm_is_core_local(mm)) {
+               int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+               if (lock_tlbie)
+                       raw_spin_lock(&native_tlbie_lock);
+               _tlbie_pid(pid, RIC_FLUSH_PWC);
+               if (lock_tlbie)
+                       raw_spin_unlock(&native_tlbie_lock);
+       } else
+               _tlbiel_pid(pid, RIC_FLUSH_PWC);
+no_context:
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__flush_tlb_pwc);
+
 void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                       unsigned long ap, int nid)
 {
-       unsigned int pid;
+       unsigned long pid;
 
        preempt_disable();
        pid = mm ? mm->context.id : 0;
@@ -196,11 +238,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
 
                if (lock_tlbie)
                        raw_spin_lock(&native_tlbie_lock);
-               _tlbie_va(vmaddr, pid, ap);
+               _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
                if (lock_tlbie)
                        raw_spin_unlock(&native_tlbie_lock);
        } else
-               _tlbiel_va(vmaddr, pid, ap);
+               _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
 bail:
        preempt_enable();
 }
@@ -224,7 +266,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
 
        if (lock_tlbie)
                raw_spin_lock(&native_tlbie_lock);
-       _tlbie_pid(0);
+       _tlbie_pid(0, RIC_FLUSH_ALL);
        if (lock_tlbie)
                raw_spin_unlock(&native_tlbie_lock);
 }
index c50ea76ba66ceb95c99555f7391b75c0744a1b96..6081fbd75330b8a936a6e590b7fcb9604c4a23cb 100644 (file)
@@ -221,7 +221,7 @@ static bool soc_has_mclk_mux0_canin(void)
 /* convenience wrappers around the common clk API */
 static inline struct clk *mpc512x_clk_fixed(const char *name, int rate)
 {
-       return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+       return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
 }
 
 static inline struct clk *mpc512x_clk_factor(
index 84fb984f29c130f89cffbbaaf1aae9b8b1d6c843..85c85eb3e245d7a7a9600a8fce95a592789f38bc 100644 (file)
@@ -172,7 +172,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
        if (rc < 0)
                goto out;
 
-       skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos;
+       skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
        if (!dump_skip(cprm, skip))
                goto Eio;
 out:
index ac3ffd97e0596626a1c72c930d76ac994c683b82..3998e0f9a03bc89e51ab041d87836a4db587933b 100644 (file)
@@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2;
 static int ibm_slot_error_detail;
 static int ibm_get_config_addr_info;
 static int ibm_get_config_addr_info2;
-static int ibm_configure_bridge;
 static int ibm_configure_pe;
 
 /*
@@ -81,7 +80,14 @@ static int pseries_eeh_init(void)
        ibm_get_config_addr_info2       = rtas_token("ibm,get-config-addr-info2");
        ibm_get_config_addr_info        = rtas_token("ibm,get-config-addr-info");
        ibm_configure_pe                = rtas_token("ibm,configure-pe");
-       ibm_configure_bridge            = rtas_token("ibm,configure-bridge");
+
+       /*
+        * ibm,configure-pe and ibm,configure-bridge have the same semantics,
+        * however ibm,configure-pe can be faster.  If we can't find
+        * ibm,configure-pe then fall back to using ibm,configure-bridge.
+        */
+       if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
+               ibm_configure_pe        = rtas_token("ibm,configure-bridge");
 
        /*
         * Necessary sanity check. We needn't check "get-config-addr-info"
@@ -93,8 +99,7 @@ static int pseries_eeh_init(void)
            (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
             ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
            ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE       ||
-           (ibm_configure_pe == RTAS_UNKNOWN_SERVICE           &&
-            ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) {
+           ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
                pr_info("EEH functionality not supported\n");
                return -EINVAL;
        }
@@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
 {
        int config_addr;
        int ret;
+       /* Waiting 0.2s maximum before skipping configuration */
+       int max_wait = 200;
 
        /* Figure out the PE address */
        config_addr = pe->config_addr;
        if (pe->addr)
                config_addr = pe->addr;
 
-       /* Use new configure-pe function, if supported */
-       if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+       while (max_wait > 0) {
                ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
                                config_addr, BUID_HI(pe->phb->buid),
                                BUID_LO(pe->phb->buid));
-       } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
-               ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
-                               config_addr, BUID_HI(pe->phb->buid),
-                               BUID_LO(pe->phb->buid));
-       } else {
-               return -EFAULT;
-       }
 
-       if (ret)
-               pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
-                       __func__, pe->phb->global_number, pe->addr, ret);
+               if (!ret)
+                       return ret;
+
+               /*
+                * If RTAS returns a delay value that's above 100ms, cut it
+                * down to 100ms in case firmware made a mistake.  For more
+                * on how these delay values work see rtas_busy_delay_time
+                */
+               if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
+                   ret <= RTAS_EXTENDED_DELAY_MAX)
+                       ret = RTAS_EXTENDED_DELAY_MIN+2;
+
+               max_wait -= rtas_busy_delay_time(ret);
+
+               if (max_wait < 0)
+                       break;
+
+               rtas_busy_delay(ret);
+       }
 
+       pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+               __func__, pe->phb->global_number, pe->addr, ret);
        return ret;
 }
 
index b7dfc1359d0113d570fe9ea7f702c728814dc66f..3e8865b187de22187a39e1bc66b662e464009f14 100644 (file)
@@ -927,7 +927,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
        dn = pci_device_to_OF_node(dev);
        pdn = PCI_DN(dn);
        buid = pdn->phb->buid;
-       cfg_addr = (pdn->busno << 8) | pdn->devfn;
+       cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
 
        ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
                  cfg_addr, BUID_HI(buid), BUID_LO(buid));
@@ -956,7 +956,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
        dn = pci_device_to_OF_node(dev);
        pdn = PCI_DN(dn);
        buid = pdn->phb->buid;
-       cfg_addr = (pdn->busno << 8) | pdn->devfn;
+       cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
 
        do {
                /* extra outputs are LIOBN and dma-addr (hi, lo) */
index 0ac42cc4f880766624cd21de501ed673615ed649..d5ec71b2ed02cd1a7f5ddcecc2257d79347963c7 100644 (file)
@@ -1,8 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
@@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
@@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
+CONFIG_ZPOOL=m
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PCI=y
 CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
@@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
@@ -537,6 +545,8 @@ CONFIG_DLM=m
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 CONFIG_FRAME_WARN=1024
 CONFIG_READABLE_ASM=y
 CONFIG_UNUSED_SYMBOLS=y
@@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y
 CONFIG_SLUB_STATS=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_VMACACHE=y
 CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_PGFLAGS=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_DETECT_HUNG_TASK=y
+CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
@@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
@@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y
 CONFIG_TEST_KSTRTOX=y
 CONFIG_DMA_API_DEBUG=y
 CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
@@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
index a31dcd56f7c0613819cc084195d07da2bdbb5ab7..f46a35115d2da8202e70621639958da91b3891c7 100644 (file)
@@ -1,8 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FRAME_WARN=1024
 CONFIG_UNUSED_SYMBOLS=y
@@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y
 CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_KPROBE_EVENT is not set
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_RBTREE_TEST=m
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
@@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
index 7b73bf3533454636100c03850b5d377525090cbe..ba0f2a58b8cdb735247de80b7772bafd68bda053 100644 (file)
@@ -1,8 +1,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
@@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FRAME_WARN=1024
 CONFIG_UNUSED_SYMBOLS=y
@@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
@@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
index 1719843a55a2abf7ecf40646aa3ad6176b9a380d..4366a3e3e7548cd7315492a5a3670c2fc7febb97 100644 (file)
@@ -1,5 +1,5 @@
 # CONFIG_SWAP is not set
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 # CONFIG_COMPAT is not set
 CONFIG_NR_CPUS=2
@@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 # CONFIG_FTRACE is not set
-# CONFIG_STRICT_DEVMEM is not set
 # CONFIG_PFAULT is not set
 # CONFIG_S390_HYPFS_FS is not set
 # CONFIG_VIRTUALIZATION is not set
index e24f2af4c73b5bc3ec24a2e4fdad20f0473ed84e..3f571ea8950947dac8ecf4c0e2c32b4b0b28f277 100644 (file)
@@ -1,8 +1,8 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
+CONFIG_USELIB=y
 CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_TASKSTATS=y
 CONFIG_TASK_DELAY_ACCT=y
@@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_BLK_DEV_INITRD=y
@@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
 CONFIG_NR_CPUS=256
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
@@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
@@ -61,7 +68,6 @@ CONFIG_UNIX=y
 CONFIG_NET_KEY=y
 CONFIG_INET=y
 CONFIG_IP_MULTICAST=y
-# CONFIG_INET_LRO is not set
 CONFIG_L2TP=m
 CONFIG_L2TP_DEBUGFS=m
 CONFIG_VLAN_8021Q=y
@@ -144,6 +150,9 @@ CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_HUGETLBFS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
 CONFIG_UNUSED_SYMBOLS=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
@@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
 CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
-CONFIG_TRACER_SNAPSHOT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
 CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
-# CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_AUTHENC=m
@@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_ZLIB=m
-CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
index 5e04f3cbd320d78e963c7ae18639c94d4b83952c..8ae236b0f80b35b127d3b87d64e1abff23d437c8 100644 (file)
@@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc)
                "       la      %0,0\n"
                "1:\n"
                EX_TABLE(0b,1b)
-               : "=d" (rc), "=d" (orig_fpc)
+               : "=d" (rc), "=&d" (orig_fpc)
                : "d" (fpc), "0" (-EINVAL));
        return rc;
 }
index 37b9017c6a96b70d5a9460be9923a44663268ff8..ac82e8eb936dc673e8079f4c91e170adf1075fc9 100644 (file)
@@ -245,6 +245,7 @@ struct kvm_vcpu_stat {
        u32 exit_stop_request;
        u32 exit_validity;
        u32 exit_instruction;
+       u32 exit_pei;
        u32 halt_successful_poll;
        u32 halt_attempted_poll;
        u32 halt_poll_invalid;
index f20abdb5630adceaa75690907ddf7cdedc098146..d14069d4b88dcd1a38fe2a3ca309cf8723909bf3 100644 (file)
@@ -2064,12 +2064,5 @@ void s390_reset_system(void)
        S390_lowcore.program_new_psw.addr =
                (unsigned long) s390_base_pgm_handler;
 
-       /*
-        * Clear subchannel ID and number to signal new kernel that no CCW or
-        * SCSI IPL has been done (for kexec and kdump)
-        */
-       S390_lowcore.subchannel_id = 0;
-       S390_lowcore.subchannel_nr = 0;
-
        do_reset_calls();
 }
index 59215c518f379d06479536653b6448a7d556fe83..7ec63b1d920d29aa75420a63ea4b4f8e868975a6 100644 (file)
@@ -649,6 +649,8 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
 
 /* Performance monitoring unit for s390x */
 static struct pmu cpumf_pmu = {
+       .task_ctx_nr  = perf_sw_context,
+       .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
        .pmu_enable   = cpumf_pmu_enable,
        .pmu_disable  = cpumf_pmu_disable,
        .event_init   = cpumf_pmu_event_init,
@@ -708,12 +710,6 @@ static int __init cpumf_pmu_init(void)
                goto out;
        }
 
-       /* The CPU measurement counter facility does not have overflow
-        * interrupts to do sampling.  Sampling must be provided by
-        * external means, for example, by timers.
-        */
-       cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
        cpumf_pmu.attr_groups = cpumf_cf_event_group();
        rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
        if (rc) {
index 2e6b54e4d3f955d1c7971fce4ad02551eeac3846..252157181302e43edb4dac6e675108002009b30a 100644 (file)
@@ -341,6 +341,8 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
 
 static int handle_partial_execution(struct kvm_vcpu *vcpu)
 {
+       vcpu->stat.exit_pei++;
+
        if (vcpu->arch.sie_block->ipa == 0xb254)        /* MVPG */
                return handle_mvpg_pei(vcpu);
        if (vcpu->arch.sie_block->ipa >> 8 == 0xae)     /* SIGP */
index 6d8ec3ac9dd8ec675d4d138a83888fa371138bcb..43f2a2b80490ce44d6d425efdeab9fbe7491b5bc 100644 (file)
@@ -61,6 +61,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "exit_external_request", VCPU_STAT(exit_external_request) },
        { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
        { "exit_instruction", VCPU_STAT(exit_instruction) },
+       { "exit_pei", VCPU_STAT(exit_pei) },
        { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
        { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
        { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
@@ -657,7 +658,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
                kvm->arch.model.cpuid = proc->cpuid;
                lowest_ibc = sclp.ibc >> 16 & 0xfff;
                unblocked_ibc = sclp.ibc & 0xfff;
-               if (lowest_ibc) {
+               if (lowest_ibc && proc->ibc) {
                        if (proc->ibc > unblocked_ibc)
                                kvm->arch.model.ibc = unblocked_ibc;
                        else if (proc->ibc < lowest_ibc)
index 7a31440173016e8cf0281deda2f81774975d80d1..19288c1b36d32bfed0013e6a90385d8d4c82d08c 100644 (file)
@@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
 
        report_user_fault(regs, SIGSEGV, 1);
        si.si_signo = SIGSEGV;
+       si.si_errno = 0;
        si.si_code = si_code;
        si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
        force_sig_info(SIGSEGV, &si, current);
index e8b5962ac12ab8035797829f6940b9c1c7cfe227..e2565d2d0c32edeea01c61de92127c8666e7a305 100644 (file)
@@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
                        return table;
        }
        /* Allocate a fresh page */
-       page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+       page = alloc_page(GFP_KERNEL);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
index 4324b87f93982f08552ecb86bb9c893431507427..9f0ce0e6eeb484c2fad96f560a3b5ca851188ea8 100644 (file)
@@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
        pgste = pgste_get_lock(ptep);
        pgstev = pgste_val(pgste);
        pte = *ptep;
-       if (pte_swap(pte) &&
+       if (!reset && pte_swap(pte) &&
            ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
             (pgstev & _PGSTE_GPS_ZERO))) {
                ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
index f010c93a88b16c1d909069c14237ff389fa759df..fda605dbc1b44b08e62f30b15b1d4d6b6283cbb3 100644 (file)
@@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  *           |               |     |
  *           +---------------+     |
  *           | 8 byte skbp   |     |
- * R15+170 -> +---------------+     |
+ * R15+176 -> +---------------+     |
  *           | 8 byte hlen   |     |
  * R15+168 -> +---------------+     |
  *           | 4 byte align  |     |
@@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
 #define STK_OFF                (STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP    160     /* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN   168     /* Offset of SKB header length on stack */
-#define STK_OFF_SKBP   170     /* Offset of SKB pointer on stack */
+#define STK_OFF_SKBP   176     /* Offset of SKB pointer on stack */
 
 #define STK_OFF_R6     (160 - 11 * 8)  /* Offset of r6 on stack */
 #define STK_OFF_TCCNT  (160 - 12 * 8)  /* Offset of tail_call_cnt on stack */
index 9133b0ec000b82037729ac8ff7e4912b252b13a2..bee281f3163d039e2fa203bcdaa02b8bf87d6ced 100644 (file)
@@ -45,7 +45,7 @@ struct bpf_jit {
        int labels[1];          /* Labels for local jumps */
 };
 
-#define BPF_SIZE_MAX   0x7ffff /* Max size for program (20 bit signed displ) */
+#define BPF_SIZE_MAX   0xffff  /* Max size for program (16 bit branches) */
 
 #define SEEN_SKB       1       /* skb access */
 #define SEEN_MEM       2       /* use mem[] for temporary storage */
@@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
                emit_load_skb_data_hlen(jit);
        if (jit->seen & SEEN_SKB_CHANGE)
                /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
-               EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
+               EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
                              STK_OFF_SKBP);
 }
 
index 2e067657db988d66d8fbd01912fbb4729d934ad3..49b012d78c1ac9a6868a1a08f88c54e415c76028 100644 (file)
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
-                                       PTE_ORDER);
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
 
        return pte;
 }
@@ -53,7 +52,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+       pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
        if (!pte)
                return NULL;
        clear_highpage(pte);
index a33673b3687df2fa1e76022635cc3961a60810c5..f3f42c84c40fd3a2b81ea523ee841c99d1e82f4b 100644 (file)
@@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -43,7 +43,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
        struct page *page;
        void *pg;
 
-       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
        if (!pg)
                return NULL;
        page = virt_to_page(pg);
index 26e03a1f7ca4b6d602e9cc80d2b04ff9bb28600f..a62bd869677976bf4759d83b8e5646420d0f6826 100644 (file)
@@ -1,7 +1,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
 
 static struct kmem_cache *pgd_cachep;
 #if PAGETABLE_LEVELS > 2
index 10e9dabc4c41337c9f4a1dfc970aa22e9e24436f..f0700cfeedd7b270fa874aae03c8df6bf862152d 100644 (file)
 
 #define        PTREGS_OFF      (STACK_BIAS + STACKFRAME_SZ)
 
+#define        RTRAP_PSTATE            (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+#define        RTRAP_PSTATE_IRQOFF     (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
+#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+
 #define __CHEETAH_ID   0x003e0014
 #define __JALAPENO_ID  0x003e0016
 #define __SERRANO_ID   0x003e0022
index 5e3187185b4a8b9c3f22d4a67ea538a12fdcd9e7..3529f1378cd8d6230a0ad3c76cec74b874a94b2d 100644 (file)
@@ -41,8 +41,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(pgtable_cache,
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -52,8 +51,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(pgtable_cache,
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index 71b5a67522abb2bb0d8457819612aa26e948fdc8..781b9f1dbdc2d24b54fe533c0961d8eab8efa44b 100644 (file)
@@ -589,8 +589,8 @@ user_rtt_fill_64bit:                                        \
         restored;                                      \
        nop; nop; nop; nop; nop; nop;                   \
        nop; nop; nop; nop; nop;                        \
-       ba,a,pt %xcc, user_rtt_fill_fixup;              \
-       ba,a,pt %xcc, user_rtt_fill_fixup;              \
+       ba,a,pt %xcc, user_rtt_fill_fixup_dax;          \
+       ba,a,pt %xcc, user_rtt_fill_fixup_mna;          \
        ba,a,pt %xcc, user_rtt_fill_fixup;
 
 
@@ -652,8 +652,8 @@ user_rtt_fill_32bit:                                        \
         restored;                                      \
        nop; nop; nop; nop; nop;                        \
        nop; nop; nop;                                  \
-       ba,a,pt %xcc, user_rtt_fill_fixup;              \
-       ba,a,pt %xcc, user_rtt_fill_fixup;              \
+       ba,a,pt %xcc, user_rtt_fill_fixup_dax;          \
+       ba,a,pt %xcc, user_rtt_fill_fixup_mna;          \
        ba,a,pt %xcc, user_rtt_fill_fixup;
 
 
index 7cf9c6ea3f1f210c0856351e47d1a4252913667b..fdb13327fded36a313b054783adf1654725d237f 100644 (file)
@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
 CFLAGS_REMOVE_pcr.o := -pg
 endif
 
+obj-$(CONFIG_SPARC64)   += urtt_fill.o
 obj-$(CONFIG_SPARC32)   += entry.o wof.o wuf.o
 obj-$(CONFIG_SPARC32)   += etrap_32.o
 obj-$(CONFIG_SPARC32)   += rtrap_32.o
index d08bdaffdbfccbafc9b3d67c30267fe397eb917f..216948ca43829d0c0a5be837ff183f86d03dfb49 100644 (file)
 #include <asm/visasm.h>
 #include <asm/processor.h>
 
-#define                RTRAP_PSTATE            (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
-#define                RTRAP_PSTATE_IRQOFF     (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
-#define                RTRAP_PSTATE_AG_IRQOFF  (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
-
 #ifdef CONFIG_CONTEXT_TRACKING
 # define SCHEDULE_USER schedule_user
 #else
@@ -242,52 +238,17 @@ rt_continue:      ldx                     [%sp + PTREGS_OFF + PT_V9_G1], %g1
                 wrpr                   %g1, %cwp
                ba,a,pt                 %xcc, user_rtt_fill_64bit
 
-user_rtt_fill_fixup:
-               rdpr    %cwp, %g1
-               add     %g1, 1, %g1
-               wrpr    %g1, 0x0, %cwp
-
-               rdpr    %wstate, %g2
-               sll     %g2, 3, %g2
-               wrpr    %g2, 0x0, %wstate
-
-               /* We know %canrestore and %otherwin are both zero.  */
-
-               sethi   %hi(sparc64_kern_pri_context), %g2
-               ldx     [%g2 + %lo(sparc64_kern_pri_context)], %g2
-               mov     PRIMARY_CONTEXT, %g1
-
-661:           stxa    %g2, [%g1] ASI_DMMU
-               .section .sun4v_1insn_patch, "ax"
-               .word   661b
-               stxa    %g2, [%g1] ASI_MMU
-               .previous
-
-               sethi   %hi(KERNBASE), %g1
-               flush   %g1
+user_rtt_fill_fixup_dax:
+               ba,pt   %xcc, user_rtt_fill_fixup_common
+                mov    1, %g3
 
-               or      %g4, FAULT_CODE_WINFIXUP, %g4
-               stb     %g4, [%g6 + TI_FAULT_CODE]
-               stx     %g5, [%g6 + TI_FAULT_ADDR]
+user_rtt_fill_fixup_mna:
+               ba,pt   %xcc, user_rtt_fill_fixup_common
+                mov    2, %g3
 
-               mov     %g6, %l1
-               wrpr    %g0, 0x0, %tl
-
-661:           nop
-               .section                .sun4v_1insn_patch, "ax"
-               .word                   661b
-               SET_GL(0)
-               .previous
-
-               wrpr    %g0, RTRAP_PSTATE, %pstate
-
-               mov     %l1, %g6
-               ldx     [%g6 + TI_TASK], %g4
-               LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
-               call    do_sparc64_fault
-                add    %sp, PTREGS_OFF, %o0
-               ba,pt   %xcc, rtrap
-                nop
+user_rtt_fill_fixup:
+               ba,pt   %xcc, user_rtt_fill_fixup_common
+                clr    %g3
 
 user_rtt_pre_restore:
                add                     %g1, 1, %g1
index 3c25241fa5cbd416ca5f95bff873c72c609a6a10..91cc2f4ae4d98ba67ae74e5e31cf58ee06d57db4 100644 (file)
@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
        return 0;
 }
 
+/* Checks if the fp is valid.  We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+       if ((((unsigned long) fp) & 15) ||
+           ((unsigned long)fp) > 0x100000000ULL - fplen)
+               return true;
+       return false;
+}
+
 void do_sigreturn32(struct pt_regs *regs)
 {
        struct signal_frame32 __user *sf;
        compat_uptr_t fpu_save;
        compat_uptr_t rwin_save;
-       unsigned int psr;
+       unsigned int psr, ufp;
        unsigned int pc, npc;
        sigset_t set;
        compat_sigset_t seta;
@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
        sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
 
        /* 1. Make sure we are not getting garbage from the user */
-       if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
-           (((unsigned long) sf) & 3))
+       if (invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv;
+
+       if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+               goto segv;
+
+       if (ufp & 0x7)
                goto segv;
 
-       if (get_user(pc, &sf->info.si_regs.pc) ||
+       if (__get_user(pc, &sf->info.si_regs.pc) ||
            __get_user(npc, &sf->info.si_regs.npc))
                goto segv;
 
@@ -227,7 +244,7 @@ segv:
 asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
 {
        struct rt_signal_frame32 __user *sf;
-       unsigned int psr, pc, npc;
+       unsigned int psr, pc, npc, ufp;
        compat_uptr_t fpu_save;
        compat_uptr_t rwin_save;
        sigset_t set;
@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
        sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
 
        /* 1. Make sure we are not getting garbage from the user */
-       if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
-           (((unsigned long) sf) & 3))
+       if (invalid_frame_pointer(sf, sizeof(*sf)))
                goto segv;
 
-       if (get_user(pc, &sf->regs.pc) || 
+       if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+               goto segv;
+
+       if (ufp & 0x7)
+               goto segv;
+
+       if (__get_user(pc, &sf->regs.pc) || 
            __get_user(npc, &sf->regs.npc))
                goto segv;
 
@@ -307,14 +329,6 @@ segv:
        force_sig(SIGSEGV, current);
 }
 
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp, int fplen)
-{
-       if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
-               return 1;
-       return 0;
-}
-
 static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
 {
        unsigned long sp;
index 52aa5e4ce5e77c0716e083dd41372c16e5ac8967..c3c12efe0bc004053fea6b49c2f34e51e1ffc32f 100644 (file)
@@ -60,10 +60,22 @@ struct rt_signal_frame {
 #define SF_ALIGNEDSZ  (((sizeof(struct signal_frame) + 7) & (~7)))
 #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
 
+/* Checks if the fp is valid.  We always build signal frames which are
+ * 16-byte aligned, therefore we can always enforce that the restore
+ * frame has that property as well.
+ */
+static inline bool invalid_frame_pointer(void __user *fp, int fplen)
+{
+       if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
+               return true;
+
+       return false;
+}
+
 asmlinkage void do_sigreturn(struct pt_regs *regs)
 {
+       unsigned long up_psr, pc, npc, ufp;
        struct signal_frame __user *sf;
-       unsigned long up_psr, pc, npc;
        sigset_t set;
        __siginfo_fpu_t __user *fpu_save;
        __siginfo_rwin_t __user *rwin_save;
@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
        sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
 
        /* 1. Make sure we are not getting garbage from the user */
-       if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+       if (!invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv_and_exit;
+
+       if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
                goto segv_and_exit;
 
-       if (((unsigned long) sf) & 3)
+       if (ufp & 0x7)
                goto segv_and_exit;
 
        err = __get_user(pc,  &sf->info.si_regs.pc);
@@ -127,7 +142,7 @@ segv_and_exit:
 asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
 {
        struct rt_signal_frame __user *sf;
-       unsigned int psr, pc, npc;
+       unsigned int psr, pc, npc, ufp;
        __siginfo_fpu_t __user *fpu_save;
        __siginfo_rwin_t __user *rwin_save;
        sigset_t set;
@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
 
        synchronize_user_stack();
        sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
-       if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
-           (((unsigned long) sf) & 0x03))
+       if (!invalid_frame_pointer(sf, sizeof(*sf)))
+               goto segv;
+
+       if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+               goto segv;
+
+       if (ufp & 0x7)
                goto segv;
 
        err = __get_user(pc, &sf->regs.pc);
@@ -178,15 +198,6 @@ segv:
        force_sig(SIGSEGV, current);
 }
 
-/* Checks if the fp is valid */
-static inline int invalid_frame_pointer(void __user *fp, int fplen)
-{
-       if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
-               return 1;
-
-       return 0;
-}
-
 static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
 {
        unsigned long sp = regs->u_regs[UREG_FP];
index 39aaec173f66ebce88fe7953c99b412f5b07cc3d..5ee930c48f4c952953a9ea584f08915901f3c83a 100644 (file)
@@ -234,6 +234,17 @@ do_sigsegv:
        goto out;
 }
 
+/* Checks if the fp is valid.  We always build rt signal frames which
+ * are 16-byte aligned, therefore we can always enforce that the
+ * restore frame has that property as well.
+ */
+static bool invalid_frame_pointer(void __user *fp)
+{
+       if (((unsigned long) fp) & 15)
+               return true;
+       return false;
+}
+
 struct rt_signal_frame {
        struct sparc_stackf     ss;
        siginfo_t               info;
@@ -246,8 +257,8 @@ struct rt_signal_frame {
 
 void do_rt_sigreturn(struct pt_regs *regs)
 {
+       unsigned long tpc, tnpc, tstate, ufp;
        struct rt_signal_frame __user *sf;
-       unsigned long tpc, tnpc, tstate;
        __siginfo_fpu_t __user *fpu_save;
        __siginfo_rwin_t __user *rwin_save;
        sigset_t set;
@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
                (regs->u_regs [UREG_FP] + STACK_BIAS);
 
        /* 1. Make sure we are not getting garbage from the user */
-       if (((unsigned long) sf) & 3)
+       if (invalid_frame_pointer(sf))
+               goto segv;
+
+       if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
                goto segv;
 
-       err = get_user(tpc, &sf->regs.tpc);
+       if ((ufp + STACK_BIAS) & 0x7)
+               goto segv;
+
+       err = __get_user(tpc, &sf->regs.tpc);
        err |= __get_user(tnpc, &sf->regs.tnpc);
        if (test_thread_flag(TIF_32BIT)) {
                tpc &= 0xffffffff;
@@ -308,14 +325,6 @@ segv:
        force_sig(SIGSEGV, current);
 }
 
-/* Checks if the fp is valid */
-static int invalid_frame_pointer(void __user *fp)
-{
-       if (((unsigned long) fp) & 15)
-               return 1;
-       return 0;
-}
-
 static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
 {
        unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
index 0f6eebe71e6c91560585580a31621f0fb59a6f81..e5fe8cef9a699a0ee645b879abd3a5bbc6fc712b 100644 (file)
@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
 int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
 {
        int err;
+
+       if (((unsigned long) fpu) & 3)
+               return -EFAULT;
+
 #ifdef CONFIG_SMP
        if (test_tsk_thread_flag(current, TIF_USEDFPU))
                regs->psr &= ~PSR_EF;
@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
        struct thread_info *t = current_thread_info();
        int i, wsaved, err;
 
-       __get_user(wsaved, &rp->wsaved);
+       if (((unsigned long) rp) & 3)
+               return -EFAULT;
+
+       get_user(wsaved, &rp->wsaved);
        if (wsaved > NSWINS)
                return -EFAULT;
 
index 387834a9c56a5143a0a28b4e49018e43f4d95212..36aadcbeac694667b78199d2dc37607e09b63233 100644 (file)
@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        unsigned long fprs;
        int err;
 
-       err = __get_user(fprs, &fpu->si_fprs);
+       if (((unsigned long) fpu) & 7)
+               return -EFAULT;
+
+       err = get_user(fprs, &fpu->si_fprs);
        fprs_write(0);
        regs->tstate &= ~TSTATE_PEF;
        if (fprs & FPRS_DL)
@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
        struct thread_info *t = current_thread_info();
        int i, wsaved, err;
 
-       __get_user(wsaved, &rp->wsaved);
+       if (((unsigned long) rp) & 7)
+               return -EFAULT;
+
+       get_user(wsaved, &rp->wsaved);
        if (wsaved > NSWINS)
                return -EFAULT;
 
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
new file mode 100644 (file)
index 0000000..5604a2b
--- /dev/null
@@ -0,0 +1,98 @@
+#include <asm/thread_info.h>
+#include <asm/trap_block.h>
+#include <asm/spitfire.h>
+#include <asm/ptrace.h>
+#include <asm/head.h>
+
+               .text
+               .align  8
+               .globl  user_rtt_fill_fixup_common
+user_rtt_fill_fixup_common:
+               rdpr    %cwp, %g1
+               add     %g1, 1, %g1
+               wrpr    %g1, 0x0, %cwp
+
+               rdpr    %wstate, %g2
+               sll     %g2, 3, %g2
+               wrpr    %g2, 0x0, %wstate
+
+               /* We know %canrestore and %otherwin are both zero.  */
+
+               sethi   %hi(sparc64_kern_pri_context), %g2
+               ldx     [%g2 + %lo(sparc64_kern_pri_context)], %g2
+               mov     PRIMARY_CONTEXT, %g1
+
+661:           stxa    %g2, [%g1] ASI_DMMU
+               .section .sun4v_1insn_patch, "ax"
+               .word   661b
+               stxa    %g2, [%g1] ASI_MMU
+               .previous
+
+               sethi   %hi(KERNBASE), %g1
+               flush   %g1
+
+               mov     %g4, %l4
+               mov     %g5, %l5
+               brnz,pn %g3, 1f
+                mov    %g3, %l3
+
+               or      %g4, FAULT_CODE_WINFIXUP, %g4
+               stb     %g4, [%g6 + TI_FAULT_CODE]
+               stx     %g5, [%g6 + TI_FAULT_ADDR]
+1:
+               mov     %g6, %l1
+               wrpr    %g0, 0x0, %tl
+
+661:           nop
+               .section                .sun4v_1insn_patch, "ax"
+               .word                   661b
+               SET_GL(0)
+               .previous
+
+               wrpr    %g0, RTRAP_PSTATE, %pstate
+
+               mov     %l1, %g6
+               ldx     [%g6 + TI_TASK], %g4
+               LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+
+               brnz,pn %l3, 1f
+                nop
+
+               call    do_sparc64_fault
+                add    %sp, PTREGS_OFF, %o0
+               ba,pt   %xcc, rtrap
+                nop
+
+1:             cmp     %g3, 2
+               bne,pn  %xcc, 2f
+                nop
+
+               sethi   %hi(tlb_type), %g1
+               lduw    [%g1 + %lo(tlb_type)], %g1
+               cmp     %g1, 3
+               bne,pt  %icc, 1f
+                add    %sp, PTREGS_OFF, %o0
+               mov     %l4, %o2
+               call    sun4v_do_mna
+                mov    %l5, %o1
+               ba,a,pt %xcc, rtrap
+1:             mov     %l4, %o1
+               mov     %l5, %o2
+               call    mem_address_unaligned
+                nop
+               ba,a,pt %xcc, rtrap
+
+2:             sethi   %hi(tlb_type), %g1
+               mov     %l4, %o1
+               lduw    [%g1 + %lo(tlb_type)], %g1
+               mov     %l5, %o2
+               cmp     %g1, 3
+               bne,pt  %icc, 1f
+                add    %sp, PTREGS_OFF, %o0
+               call    sun4v_data_access_exception
+                nop
+               ba,a,pt %xcc, rtrap
+
+1:             call    spitfire_data_access_exception
+                nop
+               ba,a,pt %xcc, rtrap
index 652683cb4b4bcc53f663dd424ed81249c7130e1f..aec508e374906ba0b1860f3669a609407c080125 100644 (file)
@@ -2704,8 +2704,7 @@ void __flush_tlb_all(void)
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                            unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-                                      __GFP_REPEAT | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        pte_t *pte = NULL;
 
        if (page)
@@ -2717,8 +2716,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 pgtable_t pte_alloc_one(struct mm_struct *mm,
                        unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-                                      __GFP_REPEAT | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
@@ -2824,9 +2822,10 @@ void hugetlb_setup(struct pt_regs *regs)
         * the Data-TLB for huge pages.
         */
        if (tlb_type == cheetah_plus) {
+               bool need_context_reload = false;
                unsigned long ctx;
 
-               spin_lock(&ctx_alloc_lock);
+               spin_lock_irq(&ctx_alloc_lock);
                ctx = mm->context.sparc64_ctx_val;
                ctx &= ~CTX_PGSZ_MASK;
                ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
@@ -2845,9 +2844,12 @@ void hugetlb_setup(struct pt_regs *regs)
                         * also executing in this address space.
                         */
                        mm->context.sparc64_ctx_val = ctx;
-                       on_each_cpu(context_reload, mm, 0);
+                       need_context_reload = true;
                }
-               spin_unlock(&ctx_alloc_lock);
+               spin_unlock_irq(&ctx_alloc_lock);
+
+               if (need_context_reload)
+                       on_each_cpu(context_reload, mm, 0);
        }
 }
 #endif
index 4b7cef9e94e05917476335cb8248de28a6f25696..c1467ac59ce63e150b571708df8b950c8eb4dfc1 100644 (file)
@@ -78,7 +78,7 @@ struct thread_info {
 
 #ifndef __ASSEMBLY__
 
-void arch_release_thread_info(struct thread_info *info);
+void arch_release_thread_stack(unsigned long *stack);
 
 /* How to get the thread information struct from C. */
 register unsigned long stack_pointer __asm__("sp");
index 6b705ccc9cc1ec57abc481e0a296d105faf8f250..a465d8372edda17448b8b9fa3c3920da9d1047b2 100644 (file)
@@ -73,8 +73,9 @@ void arch_cpu_idle(void)
 /*
  * Release a thread_info structure
  */
-void arch_release_thread_info(struct thread_info *info)
+void arch_release_thread_stack(unsigned long *stack)
 {
+       struct thread_info *info = (void *)stack;
        struct single_step_state *step_state = info->step_state;
 
        if (step_state) {
index 7bf2491a9c1f6ce8b4f2c1fc31b4dc8f46addaab..c4d5bf841a7f19bc727abbff423ed12b20666f29 100644 (file)
@@ -231,7 +231,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
                               int order)
 {
-       gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
+       gfp_t flags = GFP_KERNEL|__GFP_ZERO;
        struct page *p;
        int i;
 
index b2a2dff50b4e1983a89b9257de79417333c1f2b6..e7437ec627108a263f105ad042f6529201d1775c 100644 (file)
@@ -204,7 +204,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
        pte_t *pte;
 
-       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
@@ -212,7 +212,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *pte;
 
-       pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
index 2e02d1356fdfffd531305693399c378df7f1735d..26775793c204e77d09f6ed1e62a3edab97aa76a5 100644 (file)
@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
 #define pgd_alloc(mm)                  get_pgd_slow(mm)
 #define pgd_free(mm, pgd)              free_pgd_slow(mm, pgd)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 
 /*
  * Allocate one PTE table.
index 0a7b885964baccd540f2c9e600dce014398bd987..d9a94da0c29fd72b048080681ab1660928a7d3f0 100644 (file)
@@ -2439,6 +2439,15 @@ config PCI_CNB20LE_QUIRK
 
 source "drivers/pci/Kconfig"
 
+config ISA_BUS
+       bool "ISA-style bus support on modern systems" if EXPERT
+       select ISA_BUS_API
+       help
+         Enables ISA-style drivers on modern systems. This is necessary to
+         support PC/104 devices on X86_64 platforms.
+
+         If unsure, say N.
+
 # x86_64 have no ISA slots, but can have ISA-style DMA.
 config ISA_DMA_API
        bool "ISA-style DMA support" if (X86_64 && EXPERT)
index 700a9c6e6159362d9b62631d124d6703a6554ba5..be8e688fa0d48b1fe4aa5180c6afa2c2fb4e84f1 100644 (file)
@@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
        for i in lib lib64 share end ; do \
                if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
                        cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
+                       if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
+                               cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
+                       fi ; \
                        break ; \
                fi ; \
                if [ $$i = end ] ; then exit 1 ; fi ; \
index 33787ee817f0cdaad78814849b0aad2dc7e2b407..91eac39625beb1b85fb3651a1b1758d296779c9a 100644 (file)
@@ -263,7 +263,7 @@ static bool check_hw_exists(void)
 
 msr_fail:
        pr_cont("Broken PMU hardware detected, using software events only.\n");
-       pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+       printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
                boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
                reg, val_new);
 
@@ -2319,7 +2319,7 @@ void
 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        struct stack_frame frame;
-       const void __user *fp;
+       const unsigned long __user *fp;
 
        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
                /* TODO: We don't support guest os callchain now */
@@ -2332,7 +2332,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
        if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
                return;
 
-       fp = (void __user *)regs->bp;
+       fp = (unsigned long __user *)regs->bp;
 
        perf_callchain_store(entry, regs->ip);
 
@@ -2345,16 +2345,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
        pagefault_disable();
        while (entry->nr < entry->max_stack) {
                unsigned long bytes;
+
                frame.next_frame             = NULL;
                frame.return_address = 0;
 
-               if (!access_ok(VERIFY_READ, fp, 16))
+               if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
                        break;
 
-               bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
+               bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
                if (bytes != 0)
                        break;
-               bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
+               bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
                if (bytes != 0)
                        break;
 
index 3660b2cf245ad7d22eaf0f9d1cf2602d270b62c2..06c2baa518144336133f775299b086d0ac4306ec 100644 (file)
@@ -1,8 +1,8 @@
 obj-$(CONFIG_CPU_SUP_INTEL)            += core.o bts.o cqm.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += ds.o knc.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += lbr.o p4.o p6.o pt.o
-obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL)   += intel-rapl.o
-intel-rapl-objs                                := rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL)   += intel-rapl-perf.o
+intel-rapl-perf-objs                   := rapl.o
 obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
 intel-uncore-objs                      := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
 obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
index 7c666958a6250354aa204d24e73f94670264ffee..9b4f9d3ce4650cc7c38632b73c10e5020c0695a3 100644 (file)
@@ -115,6 +115,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 
+       /*
+        * When HT is off these events can only run on the bottom 4 counters
+        * When HT is on, they are impacted by the HT bug and require EXCL access
+        */
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -139,6 +143,10 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 
+       /*
+        * When HT is off these events can only run on the bottom 4 counters
+        * When HT is on, they are impacted by the HT bug and require EXCL access
+        */
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -182,6 +190,16 @@ struct event_constraint intel_skl_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
        INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),    /* INST_RETIRED.PREC_DIST */
+
+       /*
+        * when HT is off, these can only run on the bottom 4 counters
+        */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xc6, 0xf),      /* FRONTEND_RETIRED.* */
+
        EVENT_CONSTRAINT_END
 };
 
@@ -250,6 +268,10 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
 
+       /*
+        * When HT is off these events can only run on the bottom 4 counters
+        * When HT is on, they are impacted by the HT bug and require EXCL access
+        */
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -264,6 +286,13 @@ struct event_constraint intel_bdw_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
        INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),        /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
+       /*
+        * when HT is off, these can only run on the bottom 4 counters
+        */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
        EVENT_CONSTRAINT_END
 };
 
index 99c4bab123cdae71b14ab92241d81dbf15616fe1..e30eef4f29a6f6034e46beab4a559f9994311c34 100644 (file)
@@ -714,7 +714,7 @@ static void cleanup_rapl_pmus(void)
        int i;
 
        for (i = 0; i < rapl_pmus->maxpkg; i++)
-               kfree(rapl_pmus->pmus + i);
+               kfree(rapl_pmus->pmus[i]);
        kfree(rapl_pmus);
 }
 
index b2625867ebd17543401cf6dbc5bf9e76fcd6ee6c..874e8bd64d1d54cdf662c5ba348f8872a0265e24 100644 (file)
@@ -2868,27 +2868,10 @@ static struct intel_uncore_type bdx_uncore_cbox = {
        .format_group           = &hswep_uncore_cbox_format_group,
 };
 
-static struct intel_uncore_type bdx_uncore_sbox = {
-       .name                   = "sbox",
-       .num_counters           = 4,
-       .num_boxes              = 4,
-       .perf_ctr_bits          = 48,
-       .event_ctl              = HSWEP_S0_MSR_PMON_CTL0,
-       .perf_ctr               = HSWEP_S0_MSR_PMON_CTR0,
-       .event_mask             = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
-       .box_ctl                = HSWEP_S0_MSR_PMON_BOX_CTL,
-       .msr_offset             = HSWEP_SBOX_MSR_OFFSET,
-       .ops                    = &hswep_uncore_sbox_msr_ops,
-       .format_group           = &hswep_uncore_sbox_format_group,
-};
-
-#define BDX_MSR_UNCORE_SBOX    3
-
 static struct intel_uncore_type *bdx_msr_uncores[] = {
        &bdx_uncore_ubox,
        &bdx_uncore_cbox,
        &hswep_uncore_pcu,
-       &bdx_uncore_sbox,
        NULL,
 };
 
@@ -2897,10 +2880,6 @@ void bdx_uncore_cpu_init(void)
        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
        uncore_msr_uncores = bdx_msr_uncores;
-
-       /* BDX-DE doesn't have SBOX */
-       if (boot_cpu_data.x86_model == 86)
-               uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
 }
 
 static struct intel_uncore_type bdx_uncore_ha = {
index 4a413485f9eb8ef58ec71c77ff2594f4300c8ea6..c64b1e9c5d1a30d916be2d944a3e94134b240fb0 100644 (file)
 #define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
 #define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
 #define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_NULL_SEG       X86_BUG(9) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE   X86_BUG(10) /* SWAPGS without input dep on GS */
-
-
 #ifdef CONFIG_X86_32
 /*
  * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional
  */
 #define X86_BUG_ESPFIX         X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
 #endif
+#define X86_BUG_NULL_SEG       X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE   X86_BUG(11) /* SWAPGS without input dep on GS */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
new file mode 100644 (file)
index 0000000..6999f7d
--- /dev/null
@@ -0,0 +1,68 @@
+#ifndef _ASM_X86_INTEL_FAMILY_H
+#define _ASM_X86_INTEL_FAMILY_H
+
+/*
+ * "Big Core" Processors (Branded as Core, Xeon, etc...)
+ *
+ * The "_X" parts are generally the EP and EX Xeons, or the
+ * "Extreme" ones, like Broadwell-E.
+ *
+ * Things ending in "2" are usually because we have no better
+ * name for them.  There's no processor called "WESTMERE2".
+ */
+
+#define INTEL_FAM6_CORE_YONAH          0x0E
+#define INTEL_FAM6_CORE2_MEROM         0x0F
+#define INTEL_FAM6_CORE2_MEROM_L       0x16
+#define INTEL_FAM6_CORE2_PENRYN                0x17
+#define INTEL_FAM6_CORE2_DUNNINGTON    0x1D
+
+#define INTEL_FAM6_NEHALEM             0x1E
+#define INTEL_FAM6_NEHALEM_EP          0x1A
+#define INTEL_FAM6_NEHALEM_EX          0x2E
+#define INTEL_FAM6_WESTMERE            0x25
+#define INTEL_FAM6_WESTMERE2           0x1F
+#define INTEL_FAM6_WESTMERE_EP         0x2C
+#define INTEL_FAM6_WESTMERE_EX         0x2F
+
+#define INTEL_FAM6_SANDYBRIDGE         0x2A
+#define INTEL_FAM6_SANDYBRIDGE_X       0x2D
+#define INTEL_FAM6_IVYBRIDGE           0x3A
+#define INTEL_FAM6_IVYBRIDGE_X         0x3E
+
+#define INTEL_FAM6_HASWELL_CORE                0x3C
+#define INTEL_FAM6_HASWELL_X           0x3F
+#define INTEL_FAM6_HASWELL_ULT         0x45
+#define INTEL_FAM6_HASWELL_GT3E                0x46
+
+#define INTEL_FAM6_BROADWELL_CORE      0x3D
+#define INTEL_FAM6_BROADWELL_XEON_D    0x56
+#define INTEL_FAM6_BROADWELL_GT3E      0x47
+#define INTEL_FAM6_BROADWELL_X         0x4F
+
+#define INTEL_FAM6_SKYLAKE_MOBILE      0x4E
+#define INTEL_FAM6_SKYLAKE_DESKTOP     0x5E
+#define INTEL_FAM6_SKYLAKE_X           0x55
+#define INTEL_FAM6_KABYLAKE_MOBILE     0x8E
+#define INTEL_FAM6_KABYLAKE_DESKTOP    0x9E
+
+/* "Small Core" Processors (Atom) */
+
+#define INTEL_FAM6_ATOM_PINEVIEW       0x1C
+#define INTEL_FAM6_ATOM_LINCROFT       0x26
+#define INTEL_FAM6_ATOM_PENWELL                0x27
+#define INTEL_FAM6_ATOM_CLOVERVIEW     0x35
+#define INTEL_FAM6_ATOM_CEDARVIEW      0x36
+#define INTEL_FAM6_ATOM_SILVERMONT1    0x37 /* BayTrail/BYT / Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT2    0x4D /* Avaton/Rangely */
+#define INTEL_FAM6_ATOM_AIRMONT                0x4C /* CherryTrail / Braswell */
+#define INTEL_FAM6_ATOM_MERRIFIELD1    0x4A /* Tangier */
+#define INTEL_FAM6_ATOM_MERRIFIELD2    0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_GOLDMONT       0x5C
+#define INTEL_FAM6_ATOM_DENVERTON      0x5F /* Goldmont Microserver */
+
+/* Xeon Phi */
+
+#define INTEL_FAM6_XEON_PHI_KNL                0x57 /* Knights Landing */
+
+#endif /* _ASM_X86_INTEL_FAMILY_H */
index 4421b5da409d6424d91eac6fc91231d8e92d3658..d1d1e5094c2844d31ff7d2028c950bd42e396312 100644 (file)
@@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t;
 #define RELATIVECALL_OPCODE 0xe8
 #define RELATIVE_ADDR_SIZE 4
 #define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR)                                          \
-       (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
-                             THREAD_SIZE - (unsigned long)(ADDR)))    \
-        ? (MAX_STACK_SIZE)                                            \
-        : (((unsigned long)current_thread_info()) +                   \
-           THREAD_SIZE - (unsigned long)(ADDR)))
+#define CUR_STACK_SIZE(ADDR) \
+       (current_top_of_stack() - (unsigned long)(ADDR))
+#define MIN_STACK_SIZE(ADDR)                           \
+       (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ?        \
+        MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
 
 #define flush_insn_slot(p)     do { } while (0)
 
index e0fbe7e70dc19396ddebf638f6b126b3863b4442..69e62862b6222e4f3ec3a90f205cdf176ec983b3 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/irqbypass.h>
 #include <linux/hyperv.h>
 
+#include <asm/apic.h>
 #include <asm/pvclock-abi.h>
 #include <asm/desc.h>
 #include <asm/mtrr.h>
@@ -1368,4 +1369,14 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 
 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
+static inline int kvm_cpu_get_apicid(int mps_cpu)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+       return __default_cpu_present_to_apicid(mps_cpu);
+#else
+       WARN_ON_ONCE(1);
+       return BAD_APICID;
+#endif
+}
+
 #endif /* _ASM_X86_KVM_HOST_H */
index 7dc1d8fef7fdec6a633f5cd3f7ada4d2ba09e8fe..b5fee97813cdf8d1408aa7ac874b21593eef8121 100644 (file)
@@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr,
                     "2:\n"
                     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
                     : : "c" (msr), "a"(low), "d" (high) : "memory");
-       if (msr_tracepoint_active(__tracepoint_read_msr))
+       if (msr_tracepoint_active(__tracepoint_write_msr))
                do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
 }
 
@@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
                     : "c" (msr), "0" (low), "d" (high),
                       [fault] "i" (-EIO)
                     : "memory");
-       if (msr_tracepoint_active(__tracepoint_read_msr))
+       if (msr_tracepoint_active(__tracepoint_write_msr))
                do_trace_write_msr(msr, ((u64)high << 32 | low), err);
        return err;
 }
index bf7f8b55b0f9d8940d839b695466d32999078c10..574c23cf761ac97b50f6042cd86cc58ef1575add 100644 (file)
@@ -81,7 +81,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
        struct page *page;
-       page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+       page = alloc_pages(GFP_KERNEL |  __GFP_ZERO, 0);
        if (!page)
                return NULL;
        if (!pgtable_pmd_page_ctor(page)) {
@@ -125,7 +125,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+       return (pud_t *)get_zeroed_page(GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
index fdcc04020636e33269d7a756162cfe852f509823..7c1c89598688bab2edc26dce70ab722e5f79b123 100644 (file)
@@ -68,30 +68,23 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
        return product;
 }
 
-static __always_inline
-u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
-{
-       u64 delta = rdtsc_ordered() - src->tsc_timestamp;
-       return pvclock_scale_delta(delta, src->tsc_to_system_mul,
-                                  src->tsc_shift);
-}
-
 static __always_inline
 unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
                               cycle_t *cycles, u8 *flags)
 {
        unsigned version;
-       cycle_t ret, offset;
-       u8 ret_flags;
+       cycle_t offset;
+       u64 delta;
 
        version = src->version;
+       /* Make the latest version visible */
+       smp_rmb();
 
-       offset = pvclock_get_nsec_offset(src);
-       ret = src->system_time + offset;
-       ret_flags = src->flags;
-
-       *cycles = ret;
-       *flags = ret_flags;
+       delta = rdtsc_ordered() - src->tsc_timestamp;
+       offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
+                                  src->tsc_shift);
+       *cycles = src->system_time + offset;
+       *flags = src->flags;
        return version;
 }
 
index 7c247e7404be778e06ebff15f020dbd792fb7be0..0944218af9e279d631af9f03506a9672c4443c6d 100644 (file)
@@ -14,7 +14,7 @@ extern int kstack_depth_to_print;
 struct thread_info;
 struct stacktrace_ops;
 
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+typedef unsigned long (*walk_stack_t)(struct task_struct *task,
                                      unsigned long *stack,
                                      unsigned long bp,
                                      const struct stacktrace_ops *ops,
@@ -23,13 +23,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
                                      int *graph);
 
 extern unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
                    unsigned long *stack, unsigned long bp,
                    const struct stacktrace_ops *ops, void *data,
                    unsigned long *end, int *graph);
 
 extern unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
                       unsigned long *stack, unsigned long bp,
                       const struct stacktrace_ops *ops, void *data,
                       unsigned long *end, int *graph);
index a147e676fc7b3439d156534472d63be5c2d10f4a..e991d5c8bb3a1b1d21c08f198a6d38c97f45effb 100644 (file)
@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
        while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
                i++;
 
-       if (i == 0)
-               return 0;
+       if (!i)
+               return -ENODEV;
 
        nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
        if (!nb)
index 84e33ff5a6d595693c3718477f168f20d5546316..446702ed99dca3edf5a8a48cccb6c2475ae9e85f 100644 (file)
@@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void)
                res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
                snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
                mem += IOAPIC_RESOURCE_NAME_SIZE;
+               ioapics[i].iomem_res = &res[num];
                num++;
-               ioapics[i].iomem_res = res;
        }
 
        ioapic_resources = res;
index c343a54bed396d2f924d4ed6d8e5ec2188c636b0..f5c69d8974e176e44a995bd6f91512c7d030b001 100644 (file)
@@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
        u64 value;
 
        /* re-enable TopologyExtensions if switched off by BIOS */
-       if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+       if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
            !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 
                if (msr_set_bit(0xc0011005, 54) > 0) {
                        rdmsrl(0xc0011005, value);
                        if (value & BIT_64(54)) {
                                set_cpu_cap(c, X86_FEATURE_TOPOEXT);
-                               pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+                               pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
                        }
                }
        }
index 2bb25c3fe2e8e9c6933ab37c99a532f8a36e47fa..ef8017ca5ba9dab6c59e51ae91b8c353ddaa00da 100644 (file)
@@ -42,16 +42,14 @@ void printk_address(unsigned long address)
 static void
 print_ftrace_graph_addr(unsigned long addr, void *data,
                        const struct stacktrace_ops *ops,
-                       struct thread_info *tinfo, int *graph)
+                       struct task_struct *task, int *graph)
 {
-       struct task_struct *task;
        unsigned long ret_addr;
        int index;
 
        if (addr != (unsigned long)return_to_handler)
                return;
 
-       task = tinfo->task;
        index = task->curr_ret_stack;
 
        if (!task->ret_stack || index < *graph)
@@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
 static inline void
 print_ftrace_graph_addr(unsigned long addr, void *data,
                        const struct stacktrace_ops *ops,
-                       struct thread_info *tinfo, int *graph)
+                       struct task_struct *task, int *graph)
 { }
 #endif
 
@@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  */
 
-static inline int valid_stack_ptr(struct thread_info *tinfo,
+static inline int valid_stack_ptr(struct task_struct *task,
                        void *p, unsigned int size, void *end)
 {
-       void *t = tinfo;
+       void *t = task_stack_page(task);
        if (end) {
                if (p < end && p >= (end-THREAD_SIZE))
                        return 1;
@@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
 }
 
 unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data,
                unsigned long *end, int *graph)
 {
        struct stack_frame *frame = (struct stack_frame *)bp;
 
-       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+       while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
                unsigned long addr;
 
                addr = *stack;
@@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
                        } else {
                                ops->address(data, addr, 0);
                        }
-                       print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+                       print_ftrace_graph_addr(addr, data, ops, task, graph);
                }
                stack++;
        }
@@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo,
 EXPORT_SYMBOL_GPL(print_context_stack);
 
 unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
                       unsigned long *stack, unsigned long bp,
                       const struct stacktrace_ops *ops, void *data,
                       unsigned long *end, int *graph)
@@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
        struct stack_frame *frame = (struct stack_frame *)bp;
        unsigned long *ret_addr = &frame->return_address;
 
-       while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+       while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
                unsigned long addr = *ret_addr;
 
                if (!__kernel_text_address(addr))
@@ -139,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo,
                        break;
                frame = frame->next_frame;
                ret_addr = &frame->return_address;
-               print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+               print_ftrace_graph_addr(addr, data, ops, task, graph);
        }
 
        return (unsigned long)frame;
index 464ffd69b92e9ef376b9c534aec3c12973d6ad7a..fef917e79b9d614eeaca609c9c03e980bae2272f 100644 (file)
@@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                bp = stack_frame(task, regs);
 
        for (;;) {
-               struct thread_info *context;
                void *end_stack;
 
                end_stack = is_hardirq_stack(stack, cpu);
                if (!end_stack)
                        end_stack = is_softirq_stack(stack, cpu);
 
-               context = task_thread_info(task);
-               bp = ops->walk_stack(context, stack, bp, ops, data,
+               bp = ops->walk_stack(task, stack, bp, ops, data,
                                     end_stack, &graph);
 
                /* Stop if not on irq stack */
index 5f1c6266eb3028579f5036161dc240387dcb124f..d558a8a49016b2c6eccb50f3685cc136fdc621ad 100644 (file)
@@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                const struct stacktrace_ops *ops, void *data)
 {
        const unsigned cpu = get_cpu();
-       struct thread_info *tinfo;
        unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
        unsigned long dummy;
        unsigned used = 0;
@@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
         * current stack address. If the stacks consist of nested
         * exceptions
         */
-       tinfo = task_thread_info(task);
        while (!done) {
                unsigned long *stack_end;
                enum stack_type stype;
@@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                        if (ops->stack(data, id) < 0)
                                break;
 
-                       bp = ops->walk_stack(tinfo, stack, bp, ops,
+                       bp = ops->walk_stack(task, stack, bp, ops,
                                             data, stack_end, &graph);
                        ops->stack(data, "<EOE>");
                        /*
@@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 
                        if (ops->stack(data, "IRQ") < 0)
                                break;
-                       bp = ops->walk_stack(tinfo, stack, bp,
+                       bp = ops->walk_stack(task, stack, bp,
                                     ops, data, stack_end, &graph);
                        /*
                         * We link to the next stack (which would be
@@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
        /*
         * This handles the process stack:
         */
-       bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+       bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
        put_cpu();
 }
 EXPORT_SYMBOL(dump_trace);
index bca14c899137bfd754395772a37fbce01f721119..de7501edb21ce5de48db107191cd87a7c2515c65 100644 (file)
 
 #include <linux/pci.h>
 #include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/pci_ids.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
 #include <drm/i915_drm.h>
 #include <asm/pci-direct.h>
 #include <asm/dma.h>
@@ -21,6 +25,9 @@
 #include <asm/iommu.h>
 #include <asm/gart.h>
 #include <asm/irq_remapping.h>
+#include <asm/early_ioremap.h>
+
+#define dev_err(msg)  pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
 
 static void __init fix_hypertransport_config(int num, int slot, int func)
 {
@@ -75,6 +82,13 @@ static void __init nvidia_bugs(int num, int slot, int func)
 {
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_X86_IO_APIC
+       /*
+        * Only applies to Nvidia root ports (bus 0) and not to
+        * Nvidia graphics cards with PCI ports on secondary buses.
+        */
+       if (num)
+               return;
+
        /*
         * All timer overrides on Nvidia are
         * wrong unless HPET is enabled.
@@ -223,36 +237,19 @@ static void __init intel_remapping_check(int num, int slot, int func)
  * despite the efforts of the "RAM buffer" approach, which simply rounds
  * memory boundaries up to 64M to try to catch space that may decode
  * as RAM and so is not suitable for MMIO.
- *
- * And yes, so far on current devices the base addr is always under 4G.
  */
-static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
-{
-       u32 base;
-
-       /*
-        * For the PCI IDs in this quirk, the stolen base is always
-        * in 0x5c, aka the BDSM register (yes that's really what
-        * it's called).
-        */
-       base = read_pci_config(num, slot, func, 0x5c);
-       base &= ~((1<<20) - 1);
-
-       return base;
-}
 
 #define KB(x)  ((x) * 1024UL)
 #define MB(x)  (KB (KB (x)))
-#define GB(x)  (MB (KB (x)))
 
 static size_t __init i830_tseg_size(void)
 {
-       u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
+       u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
 
-       if (!(tmp & TSEG_ENABLE))
+       if (!(esmramc & TSEG_ENABLE))
                return 0;
 
-       if (tmp & I830_TSEG_SIZE_1M)
+       if (esmramc & I830_TSEG_SIZE_1M)
                return MB(1);
        else
                return KB(512);
@@ -260,27 +257,26 @@ static size_t __init i830_tseg_size(void)
 
 static size_t __init i845_tseg_size(void)
 {
-       u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
+       u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
+       u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK;
 
-       if (!(tmp & TSEG_ENABLE))
+       if (!(esmramc & TSEG_ENABLE))
                return 0;
 
-       switch (tmp & I845_TSEG_SIZE_MASK) {
-       case I845_TSEG_SIZE_512K:
-               return KB(512);
-       case I845_TSEG_SIZE_1M:
-               return MB(1);
+       switch (tseg_size) {
+       case I845_TSEG_SIZE_512K:       return KB(512);
+       case I845_TSEG_SIZE_1M:         return MB(1);
        default:
-               WARN_ON(1);
-               return 0;
+               WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc);
        }
+       return 0;
 }
 
 static size_t __init i85x_tseg_size(void)
 {
-       u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
+       u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
 
-       if (!(tmp & TSEG_ENABLE))
+       if (!(esmramc & TSEG_ENABLE))
                return 0;
 
        return MB(1);
@@ -300,285 +296,287 @@ static size_t __init i85x_mem_size(void)
  * On 830/845/85x the stolen memory base isn't available in any
  * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
  */
-static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i830_stolen_base(int num, int slot, int func,
+                                          size_t stolen_size)
 {
-       return i830_mem_size() - i830_tseg_size() - stolen_size;
+       return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size;
 }
 
-static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i845_stolen_base(int num, int slot, int func,
+                                          size_t stolen_size)
 {
-       return i830_mem_size() - i845_tseg_size() - stolen_size;
+       return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size;
 }
 
-static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i85x_stolen_base(int num, int slot, int func,
+                                          size_t stolen_size)
 {
-       return i85x_mem_size() - i85x_tseg_size() - stolen_size;
+       return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size;
 }
 
-static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size)
+static phys_addr_t __init i865_stolen_base(int num, int slot, int func,
+                                          size_t stolen_size)
 {
+       u16 toud;
+
        /*
         * FIXME is the graphics stolen memory region
         * always at TOUD? Ie. is it always the last
         * one to be allocated by the BIOS?
         */
-       return read_pci_config_16(0, 0, 0, I865_TOUD) << 16;
+       toud = read_pci_config_16(0, 0, 0, I865_TOUD);
+
+       return (phys_addr_t)toud << 16;
+}
+
+static phys_addr_t __init gen3_stolen_base(int num, int slot, int func,
+                                          size_t stolen_size)
+{
+       u32 bsm;
+
+       /* Almost universally we can find the Graphics Base of Stolen Memory
+        * at register BSM (0x5c) in the igfx configuration space. On a few
+        * (desktop) machines this is also mirrored in the bridge device at
+        * different locations, or in the MCHBAR.
+        */
+       bsm = read_pci_config(num, slot, func, INTEL_BSM);
+
+       return (phys_addr_t)bsm & INTEL_BSM_MASK;
 }
 
 static size_t __init i830_stolen_size(int num, int slot, int func)
 {
-       size_t stolen_size;
        u16 gmch_ctrl;
+       u16 gms;
 
        gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
-
-       switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
-       case I830_GMCH_GMS_STOLEN_512:
-               stolen_size = KB(512);
-               break;
-       case I830_GMCH_GMS_STOLEN_1024:
-               stolen_size = MB(1);
-               break;
-       case I830_GMCH_GMS_STOLEN_8192:
-               stolen_size = MB(8);
-               break;
-       case I830_GMCH_GMS_LOCAL:
-               /* local memory isn't part of the normal address space */
-               stolen_size = 0;
-               break;
+       gms = gmch_ctrl & I830_GMCH_GMS_MASK;
+
+       switch (gms) {
+       case I830_GMCH_GMS_STOLEN_512:  return KB(512);
+       case I830_GMCH_GMS_STOLEN_1024: return MB(1);
+       case I830_GMCH_GMS_STOLEN_8192: return MB(8);
+       /* local memory isn't part of the normal address space */
+       case I830_GMCH_GMS_LOCAL:       return 0;
        default:
-               return 0;
+               WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
        }
 
-       return stolen_size;
+       return 0;
 }
 
 static size_t __init gen3_stolen_size(int num, int slot, int func)
 {
-       size_t stolen_size;
        u16 gmch_ctrl;
+       u16 gms;
 
        gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
-
-       switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
-       case I855_GMCH_GMS_STOLEN_1M:
-               stolen_size = MB(1);
-               break;
-       case I855_GMCH_GMS_STOLEN_4M:
-               stolen_size = MB(4);
-               break;
-       case I855_GMCH_GMS_STOLEN_8M:
-               stolen_size = MB(8);
-               break;
-       case I855_GMCH_GMS_STOLEN_16M:
-               stolen_size = MB(16);
-               break;
-       case I855_GMCH_GMS_STOLEN_32M:
-               stolen_size = MB(32);
-               break;
-       case I915_GMCH_GMS_STOLEN_48M:
-               stolen_size = MB(48);
-               break;
-       case I915_GMCH_GMS_STOLEN_64M:
-               stolen_size = MB(64);
-               break;
-       case G33_GMCH_GMS_STOLEN_128M:
-               stolen_size = MB(128);
-               break;
-       case G33_GMCH_GMS_STOLEN_256M:
-               stolen_size = MB(256);
-               break;
-       case INTEL_GMCH_GMS_STOLEN_96M:
-               stolen_size = MB(96);
-               break;
-       case INTEL_GMCH_GMS_STOLEN_160M:
-               stolen_size = MB(160);
-               break;
-       case INTEL_GMCH_GMS_STOLEN_224M:
-               stolen_size = MB(224);
-               break;
-       case INTEL_GMCH_GMS_STOLEN_352M:
-               stolen_size = MB(352);
-               break;
+       gms = gmch_ctrl & I855_GMCH_GMS_MASK;
+
+       switch (gms) {
+       case I855_GMCH_GMS_STOLEN_1M:   return MB(1);
+       case I855_GMCH_GMS_STOLEN_4M:   return MB(4);
+       case I855_GMCH_GMS_STOLEN_8M:   return MB(8);
+       case I855_GMCH_GMS_STOLEN_16M:  return MB(16);
+       case I855_GMCH_GMS_STOLEN_32M:  return MB(32);
+       case I915_GMCH_GMS_STOLEN_48M:  return MB(48);
+       case I915_GMCH_GMS_STOLEN_64M:  return MB(64);
+       case G33_GMCH_GMS_STOLEN_128M:  return MB(128);
+       case G33_GMCH_GMS_STOLEN_256M:  return MB(256);
+       case INTEL_GMCH_GMS_STOLEN_96M: return MB(96);
+       case INTEL_GMCH_GMS_STOLEN_160M:return MB(160);
+       case INTEL_GMCH_GMS_STOLEN_224M:return MB(224);
+       case INTEL_GMCH_GMS_STOLEN_352M:return MB(352);
        default:
-               stolen_size = 0;
-               break;
+               WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl);
        }
 
-       return stolen_size;
+       return 0;
 }
 
 static size_t __init gen6_stolen_size(int num, int slot, int func)
 {
        u16 gmch_ctrl;
+       u16 gms;
 
        gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
-       gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
-       gmch_ctrl &= SNB_GMCH_GMS_MASK;
+       gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
 
-       return gmch_ctrl << 25; /* 32 MB units */
+       return (size_t)gms * MB(32);
 }
 
 static size_t __init gen8_stolen_size(int num, int slot, int func)
 {
        u16 gmch_ctrl;
+       u16 gms;
 
        gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
-       gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
-       gmch_ctrl &= BDW_GMCH_GMS_MASK;
-       return gmch_ctrl << 25; /* 32 MB units */
+       gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
+
+       return (size_t)gms * MB(32);
 }
 
 static size_t __init chv_stolen_size(int num, int slot, int func)
 {
        u16 gmch_ctrl;
+       u16 gms;
 
        gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
-       gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
-       gmch_ctrl &= SNB_GMCH_GMS_MASK;
+       gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK;
 
        /*
         * 0x0  to 0x10: 32MB increments starting at 0MB
         * 0x11 to 0x16: 4MB increments starting at 8MB
         * 0x17 to 0x1d: 4MB increments start at 36MB
         */
-       if (gmch_ctrl < 0x11)
-               return gmch_ctrl << 25;
-       else if (gmch_ctrl < 0x17)
-               return (gmch_ctrl - 0x11 + 2) << 22;
+       if (gms < 0x11)
+               return (size_t)gms * MB(32);
+       else if (gms < 0x17)
+               return (size_t)(gms - 0x11 + 2) * MB(4);
        else
-               return (gmch_ctrl - 0x17 + 9) << 22;
+               return (size_t)(gms - 0x17 + 9) * MB(4);
 }
 
-struct intel_stolen_funcs {
-       size_t (*size)(int num, int slot, int func);
-       u32 (*base)(int num, int slot, int func, size_t size);
-};
-
 static size_t __init gen9_stolen_size(int num, int slot, int func)
 {
        u16 gmch_ctrl;
+       u16 gms;
 
        gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
-       gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
-       gmch_ctrl &= BDW_GMCH_GMS_MASK;
+       gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK;
 
-       if (gmch_ctrl < 0xf0)
-               return gmch_ctrl << 25; /* 32 MB units */
+       /* 0x0  to 0xef: 32MB increments starting at 0MB */
+       /* 0xf0 to 0xfe: 4MB increments starting at 4MB */
+       if (gms < 0xf0)
+               return (size_t)gms * MB(32);
        else
-               /* 4MB increments starting at 0xf0 for 4MB */
-               return (gmch_ctrl - 0xf0 + 1) << 22;
+               return (size_t)(gms - 0xf0 + 1) * MB(4);
 }
 
-typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+struct intel_early_ops {
+       size_t (*stolen_size)(int num, int slot, int func);
+       phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size);
+};
 
-static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
-       .base = i830_stolen_base,
-       .size = i830_stolen_size,
+static const struct intel_early_ops i830_early_ops __initconst = {
+       .stolen_base = i830_stolen_base,
+       .stolen_size = i830_stolen_size,
 };
 
-static const struct intel_stolen_funcs i845_stolen_funcs __initconst = {
-       .base = i845_stolen_base,
-       .size = i830_stolen_size,
+static const struct intel_early_ops i845_early_ops __initconst = {
+       .stolen_base = i845_stolen_base,
+       .stolen_size = i830_stolen_size,
 };
 
-static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = {
-       .base = i85x_stolen_base,
-       .size = gen3_stolen_size,
+static const struct intel_early_ops i85x_early_ops __initconst = {
+       .stolen_base = i85x_stolen_base,
+       .stolen_size = gen3_stolen_size,
 };
 
-static const struct intel_stolen_funcs i865_stolen_funcs __initconst = {
-       .base = i865_stolen_base,
-       .size = gen3_stolen_size,
+static const struct intel_early_ops i865_early_ops __initconst = {
+       .stolen_base = i865_stolen_base,
+       .stolen_size = gen3_stolen_size,
 };
 
-static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = {
-       .base = intel_stolen_base,
-       .size = gen3_stolen_size,
+static const struct intel_early_ops gen3_early_ops __initconst = {
+       .stolen_base = gen3_stolen_base,
+       .stolen_size = gen3_stolen_size,
 };
 
-static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = {
-       .base = intel_stolen_base,
-       .size = gen6_stolen_size,
+static const struct intel_early_ops gen6_early_ops __initconst = {
+       .stolen_base = gen3_stolen_base,
+       .stolen_size = gen6_stolen_size,
 };
 
-static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
-       .base = intel_stolen_base,
-       .size = gen8_stolen_size,
+static const struct intel_early_ops gen8_early_ops __initconst = {
+       .stolen_base = gen3_stolen_base,
+       .stolen_size = gen8_stolen_size,
 };
 
-static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
-       .base = intel_stolen_base,
-       .size = gen9_stolen_size,
+static const struct intel_early_ops gen9_early_ops __initconst = {
+       .stolen_base = gen3_stolen_base,
+       .stolen_size = gen9_stolen_size,
 };
 
-static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
-       .base = intel_stolen_base,
-       .size = chv_stolen_size,
+static const struct intel_early_ops chv_early_ops __initconst = {
+       .stolen_base = gen3_stolen_base,
+       .stolen_size = chv_stolen_size,
 };
 
-static const struct pci_device_id intel_stolen_ids[] __initconst = {
-       INTEL_I830_IDS(&i830_stolen_funcs),
-       INTEL_I845G_IDS(&i845_stolen_funcs),
-       INTEL_I85X_IDS(&i85x_stolen_funcs),
-       INTEL_I865G_IDS(&i865_stolen_funcs),
-       INTEL_I915G_IDS(&gen3_stolen_funcs),
-       INTEL_I915GM_IDS(&gen3_stolen_funcs),
-       INTEL_I945G_IDS(&gen3_stolen_funcs),
-       INTEL_I945GM_IDS(&gen3_stolen_funcs),
-       INTEL_VLV_M_IDS(&gen6_stolen_funcs),
-       INTEL_VLV_D_IDS(&gen6_stolen_funcs),
-       INTEL_PINEVIEW_IDS(&gen3_stolen_funcs),
-       INTEL_I965G_IDS(&gen3_stolen_funcs),
-       INTEL_G33_IDS(&gen3_stolen_funcs),
-       INTEL_I965GM_IDS(&gen3_stolen_funcs),
-       INTEL_GM45_IDS(&gen3_stolen_funcs),
-       INTEL_G45_IDS(&gen3_stolen_funcs),
-       INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs),
-       INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs),
-       INTEL_SNB_D_IDS(&gen6_stolen_funcs),
-       INTEL_SNB_M_IDS(&gen6_stolen_funcs),
-       INTEL_IVB_M_IDS(&gen6_stolen_funcs),
-       INTEL_IVB_D_IDS(&gen6_stolen_funcs),
-       INTEL_HSW_D_IDS(&gen6_stolen_funcs),
-       INTEL_HSW_M_IDS(&gen6_stolen_funcs),
-       INTEL_BDW_M_IDS(&gen8_stolen_funcs),
-       INTEL_BDW_D_IDS(&gen8_stolen_funcs),
-       INTEL_CHV_IDS(&chv_stolen_funcs),
-       INTEL_SKL_IDS(&gen9_stolen_funcs),
-       INTEL_BXT_IDS(&gen9_stolen_funcs),
-       INTEL_KBL_IDS(&gen9_stolen_funcs),
+static const struct pci_device_id intel_early_ids[] __initconst = {
+       INTEL_I830_IDS(&i830_early_ops),
+       INTEL_I845G_IDS(&i845_early_ops),
+       INTEL_I85X_IDS(&i85x_early_ops),
+       INTEL_I865G_IDS(&i865_early_ops),
+       INTEL_I915G_IDS(&gen3_early_ops),
+       INTEL_I915GM_IDS(&gen3_early_ops),
+       INTEL_I945G_IDS(&gen3_early_ops),
+       INTEL_I945GM_IDS(&gen3_early_ops),
+       INTEL_VLV_M_IDS(&gen6_early_ops),
+       INTEL_VLV_D_IDS(&gen6_early_ops),
+       INTEL_PINEVIEW_IDS(&gen3_early_ops),
+       INTEL_I965G_IDS(&gen3_early_ops),
+       INTEL_G33_IDS(&gen3_early_ops),
+       INTEL_I965GM_IDS(&gen3_early_ops),
+       INTEL_GM45_IDS(&gen3_early_ops),
+       INTEL_G45_IDS(&gen3_early_ops),
+       INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
+       INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
+       INTEL_SNB_D_IDS(&gen6_early_ops),
+       INTEL_SNB_M_IDS(&gen6_early_ops),
+       INTEL_IVB_M_IDS(&gen6_early_ops),
+       INTEL_IVB_D_IDS(&gen6_early_ops),
+       INTEL_HSW_D_IDS(&gen6_early_ops),
+       INTEL_HSW_M_IDS(&gen6_early_ops),
+       INTEL_BDW_M_IDS(&gen8_early_ops),
+       INTEL_BDW_D_IDS(&gen8_early_ops),
+       INTEL_CHV_IDS(&chv_early_ops),
+       INTEL_SKL_IDS(&gen9_early_ops),
+       INTEL_BXT_IDS(&gen9_early_ops),
+       INTEL_KBL_IDS(&gen9_early_ops),
 };
 
-static void __init intel_graphics_stolen(int num, int slot, int func)
+static void __init
+intel_graphics_stolen(int num, int slot, int func,
+                     const struct intel_early_ops *early_ops)
 {
+       phys_addr_t base, end;
        size_t size;
+
+       size = early_ops->stolen_size(num, slot, func);
+       base = early_ops->stolen_base(num, slot, func, size);
+
+       if (!size || !base)
+               return;
+
+       end = base + size - 1;
+       printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n",
+              &base, &end);
+
+       /* Mark this space as reserved */
+       e820_add_region(base, size, E820_RESERVED);
+       sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+}
+
+static void __init intel_graphics_quirks(int num, int slot, int func)
+{
+       const struct intel_early_ops *early_ops;
+       u16 device;
        int i;
-       u32 start;
-       u16 device, subvendor, subdevice;
 
        device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
-       subvendor = read_pci_config_16(num, slot, func,
-                                      PCI_SUBSYSTEM_VENDOR_ID);
-       subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID);
-
-       for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
-               if (intel_stolen_ids[i].device == device) {
-                       const struct intel_stolen_funcs *stolen_funcs =
-                               (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data;
-                       size = stolen_funcs->size(num, slot, func);
-                       start = stolen_funcs->base(num, slot, func, size);
-                       if (size && start) {
-                               printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n",
-                                      start, start + (u32)size - 1);
-                               /* Mark this space as reserved */
-                               e820_add_region(start, size, E820_RESERVED);
-                               sanitize_e820_map(e820.map,
-                                                 ARRAY_SIZE(e820.map),
-                                                 &e820.nr_map);
-                       }
-                       return;
-               }
+
+       for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
+               kernel_ulong_t driver_data = intel_early_ids[i].driver_data;
+
+               if (intel_early_ids[i].device != device)
+                       continue;
+
+               early_ops = (typeof(early_ops))driver_data;
+
+               intel_graphics_stolen(num, slot, func, early_ops);
+
+               return;
        }
 }
 
@@ -590,6 +588,61 @@ static void __init force_disable_hpet(int num, int slot, int func)
 #endif
 }
 
+#define BCM4331_MMIO_SIZE      16384
+#define BCM4331_PM_CAP         0x40
+#define bcma_aread32(reg)      ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
+#define bcma_awrite32(reg, val)        iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
+
+static void __init apple_airport_reset(int bus, int slot, int func)
+{
+       void __iomem *mmio;
+       u16 pmcsr;
+       u64 addr;
+       int i;
+
+       if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
+               return;
+
+       /* Card may have been put into PCI_D3hot by grub quirk */
+       pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+
+       if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+               pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+               write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
+               mdelay(10);
+
+               pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+               if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+                       dev_err("Cannot power up Apple AirPort card\n");
+                       return;
+               }
+       }
+
+       addr  =      read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
+       addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
+       addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+       mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
+       if (!mmio) {
+               dev_err("Cannot iomap Apple AirPort card\n");
+               return;
+       }
+
+       pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
+
+       for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
+               udelay(10);
+
+       bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+       bcma_aread32(BCMA_RESET_CTL);
+       udelay(1);
+
+       bcma_awrite32(BCMA_RESET_CTL, 0);
+       bcma_aread32(BCMA_RESET_CTL);
+       udelay(10);
+
+       early_iounmap(mmio, BCM4331_MMIO_SIZE);
+}
 
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
@@ -603,12 +656,6 @@ struct chipset {
        void (*f)(int num, int slot, int func);
 };
 
-/*
- * Only works for devices on the root bus. If you add any devices
- * not on bus 0 readd another loop level in early_quirks(). But
- * be careful because at least the Nvidia quirk here relies on
- * only matching on bus 0.
- */
 static struct chipset early_qrk[] __initdata = {
        { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
          PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
@@ -627,7 +674,7 @@ static struct chipset early_qrk[] __initdata = {
        { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
          PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
        { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
-         QFLAG_APPLY_ONCE, intel_graphics_stolen },
+         QFLAG_APPLY_ONCE, intel_graphics_quirks },
        /*
         * HPET on the current version of the Baytrail platform has accuracy
         * problems: it will halt in deep idle state - so we disable it.
@@ -638,9 +685,13 @@ static struct chipset early_qrk[] __initdata = {
         */
        { PCI_VENDOR_ID_INTEL, 0x0f00,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+       { PCI_VENDOR_ID_BROADCOM, 0x4331,
+         PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
        {}
 };
 
+static void __init early_pci_scan_bus(int bus);
+
 /**
  * check_dev_quirk - apply early quirks to a given PCI device
  * @num: bus number
@@ -649,7 +700,7 @@ static struct chipset early_qrk[] __initdata = {
  *
  * Check the vendor & device ID against the early quirks table.
  *
- * If the device is single function, let early_quirks() know so we don't
+ * If the device is single function, let early_pci_scan_bus() know so we don't
  * poke at this device again.
  */
 static int __init check_dev_quirk(int num, int slot, int func)
@@ -658,6 +709,7 @@ static int __init check_dev_quirk(int num, int slot, int func)
        u16 vendor;
        u16 device;
        u8 type;
+       u8 sec;
        int i;
 
        class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
@@ -685,25 +737,36 @@ static int __init check_dev_quirk(int num, int slot, int func)
 
        type = read_pci_config_byte(num, slot, func,
                                    PCI_HEADER_TYPE);
+
+       if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
+               sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
+               if (sec > num)
+                       early_pci_scan_bus(sec);
+       }
+
        if (!(type & 0x80))
                return -1;
 
        return 0;
 }
 
-void __init early_quirks(void)
+static void __init early_pci_scan_bus(int bus)
 {
        int slot, func;
 
-       if (!early_pci_allowed())
-               return;
-
        /* Poor man's PCI discovery */
-       /* Only scan the root bus */
        for (slot = 0; slot < 32; slot++)
                for (func = 0; func < 8; func++) {
                        /* Only probe function 0 on single fn devices */
-                       if (check_dev_quirk(0, slot, func))
+                       if (check_dev_quirk(bus, slot, func))
                                break;
                }
 }
+
+void __init early_quirks(void)
+{
+       if (!early_pci_allowed())
+               return;
+
+       early_pci_scan_bus(0);
+}
index 4d38416e2a7fd0625488d635dc5452ffb651c115..04f89caef9c4926c40092aed725db848adc2379e 100644 (file)
@@ -57,7 +57,7 @@
 # error "Need more than one PGD for the ESPFIX hack"
 #endif
 
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 
 /* This contains the *bottom* address of the espfix stack */
 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
index 38da8f29a9c81f075d4b91fc6982e28ae2f28eee..c627bf8d98adf8377a351f1cbbe98aa8e29fe7f1 100644 (file)
@@ -130,11 +130,9 @@ void irq_ctx_init(int cpu)
 
 void do_softirq_own_stack(void)
 {
-       struct thread_info *curstk;
        struct irq_stack *irqstk;
        u32 *isp, *prev_esp;
 
-       curstk = current_stack();
        irqstk = __this_cpu_read(softirq_stack);
 
        /* build the stack frame on the softirq stack */
index 38cf7a7412503f513bb1aae1c1acc5b76e7d2e1d..7847e5c0e0b5d0ce9af2cd47274b52bab7f2597a 100644 (file)
@@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
                 * normal page fault.
                 */
                regs->ip = (unsigned long)cur->addr;
+               /*
+                * Trap flag (TF) has been set here because this fault
+                * happened where the single stepping will be done.
+                * So clear it by resetting the current kprobe:
+                */
+               regs->flags &= ~X86_EFLAGS_TF;
+
+               /*
+                * If the TF flag was set before the kprobe hit,
+                * don't touch it:
+                */
                regs->flags |= kcb->kprobe_old_flags;
+
                if (kcb->kprobe_status == KPROBE_REENTER)
                        restore_previous_kprobe(kcb);
                else
index 99bfc025111d3dd62bc7c1753d126e97fb4e48ac..06c58ce46762ed9165cd3317292bbc57efa7055c 100644 (file)
@@ -61,11 +61,16 @@ void pvclock_resume(void)
 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
 {
        unsigned version;
-       cycle_t ret;
        u8 flags;
 
        do {
-               version = __pvclock_read_cycles(src, &ret, &flags);
+               version = src->version;
+               /* Make the latest version visible */
+               smp_rmb();
+
+               flags = src->flags;
+               /* Make sure that the version double-check is last. */
+               smp_rmb();
        } while ((src->version & 1) || version != src->version);
 
        return flags & valid_flags;
@@ -80,6 +85,8 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
 
        do {
                version = __pvclock_read_cycles(src, &ret, &flags);
+               /* Make sure that the version double-check is last. */
+               smp_rmb();
        } while ((src->version & 1) || version != src->version);
 
        if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
index d1590486204a1bb52974c27793dc66139f20c4ee..00f03d82e69acfbb7c90a7d8d859516088a43e6d 100644 (file)
@@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
                local_irq_disable();
 }
 
+/*
+ * In IST context, we explicitly disable preemption.  This serves two
+ * purposes: it makes it much less likely that we would accidentally
+ * schedule in IST context and it will force a warning if we somehow
+ * manage to schedule by accident.
+ */
 void ist_enter(struct pt_regs *regs)
 {
        if (user_mode(regs)) {
@@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs)
                rcu_nmi_enter();
        }
 
-       /*
-        * We are atomic because we're on the IST stack; or we're on
-        * x86_32, in which case we still shouldn't schedule; or we're
-        * on x86_64 and entered from user mode, in which case we're
-        * still atomic unless ist_begin_non_atomic is called.
-        */
-       preempt_count_add(HARDIRQ_OFFSET);
+       preempt_disable();
 
        /* This code is a bit fragile.  Test it. */
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
@@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs)
 
 void ist_exit(struct pt_regs *regs)
 {
-       preempt_count_sub(HARDIRQ_OFFSET);
+       preempt_enable_no_resched();
 
        if (!user_mode(regs))
                rcu_nmi_exit();
@@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
        BUG_ON((unsigned long)(current_top_of_stack() -
                               current_stack_pointer()) >= THREAD_SIZE);
 
-       preempt_count_sub(HARDIRQ_OFFSET);
+       preempt_enable_no_resched();
 }
 
 /**
@@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
  */
 void ist_end_non_atomic(void)
 {
-       preempt_count_add(HARDIRQ_OFFSET);
+       preempt_disable();
 }
 
 static nokprobe_inline int
index 769af907f82485edc91bf3e4bbfcebddbe148fcc..7597b42a8a883c668ddbcf28ba03cde86cb30924 100644 (file)
@@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
                             struct kvm_cpuid_entry __user *entries)
 {
        int r, i;
-       struct kvm_cpuid_entry *cpuid_entries;
+       struct kvm_cpuid_entry *cpuid_entries = NULL;
 
        r = -E2BIG;
        if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
                goto out;
        r = -ENOMEM;
-       cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
-       if (!cpuid_entries)
-               goto out;
-       r = -EFAULT;
-       if (copy_from_user(cpuid_entries, entries,
-                          cpuid->nent * sizeof(struct kvm_cpuid_entry)))
-               goto out_free;
+       if (cpuid->nent) {
+               cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
+                                       cpuid->nent);
+               if (!cpuid_entries)
+                       goto out;
+               r = -EFAULT;
+               if (copy_from_user(cpuid_entries, entries,
+                                  cpuid->nent * sizeof(struct kvm_cpuid_entry)))
+                       goto out;
+       }
        for (i = 0; i < cpuid->nent; i++) {
                vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
                vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
@@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
        kvm_x86_ops->cpuid_update(vcpu);
        r = kvm_update_cpuid(vcpu);
 
-out_free:
-       vfree(cpuid_entries);
 out:
+       vfree(cpuid_entries);
        return r;
 }
 
index bbb5b283ff63a9285ddcedc113a91dbf22859bbb..a397200281c119aabb283fce45c4a921f3f4cd24 100644 (file)
@@ -1310,7 +1310,8 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
 
        /* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
        if (guest_tsc < tsc_deadline)
-               __delay(tsc_deadline - guest_tsc);
+               __delay(min(tsc_deadline - guest_tsc,
+                       nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
 }
 
 static void start_apic_timer(struct kvm_lapic *apic)
index 24e800116ab4a25d750dca67023633b16f648972..def97b3a392b52bd004b9b8e60d8bf70c018bc0c 100644 (file)
@@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte)
 #ifdef CONFIG_X86_64
 static void __set_spte(u64 *sptep, u64 spte)
 {
-       *sptep = spte;
+       WRITE_ONCE(*sptep, spte);
 }
 
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
 {
-       *sptep = spte;
+       WRITE_ONCE(*sptep, spte);
 }
 
 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
@@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte)
         */
        smp_wmb();
 
-       ssptep->spte_low = sspte.spte_low;
+       WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
 }
 
 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
@@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte)
        ssptep = (union split_spte *)sptep;
        sspte = (union split_spte)spte;
 
-       ssptep->spte_low = sspte.spte_low;
+       WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
 
        /*
         * If we map the spte from present to nonpresent, we should clear
index 1163e8173e5a71b26d77f9ffde3612bddefc76aa..16ef31b874522143e275591eb93c9705df83c873 100644 (file)
@@ -238,7 +238,9 @@ module_param(nested, int, S_IRUGO);
 
 /* enable / disable AVIC */
 static int avic;
+#ifdef CONFIG_X86_LOCAL_APIC
 module_param(avic, int, S_IRUGO);
+#endif
 
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
@@ -981,11 +983,14 @@ static __init int svm_hardware_setup(void)
        } else
                kvm_disable_tdp();
 
-       if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)))
-               avic = false;
-
-       if (avic)
-               pr_info("AVIC enabled\n");
+       if (avic) {
+               if (!npt_enabled ||
+                   !boot_cpu_has(X86_FEATURE_AVIC) ||
+                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
+                       avic = false;
+               else
+                       pr_info("AVIC enabled\n");
+       }
 
        return 0;
 
@@ -1324,7 +1329,7 @@ free_avic:
 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
 {
        u64 entry;
-       int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu);
+       int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu);
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -1349,7 +1354,7 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        u64 entry;
        /* ID = 0xff (broadcast), ID > 0xff (reserved) */
-       int h_physical_id = __default_cpu_present_to_apicid(cpu);
+       int h_physical_id = kvm_cpu_get_apicid(cpu);
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -4236,7 +4241,7 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
 
        if (avic_vcpu_is_running(vcpu))
                wrmsrl(SVM_AVIC_DOORBELL,
-                      __default_cpu_present_to_apicid(vcpu->cpu));
+                      kvm_cpu_get_apicid(vcpu->cpu));
        else
                kvm_vcpu_wake_up(vcpu);
 }
index fb93010beaa4df8b61fb4b654d889b1dcaa0990d..64a79f271276a025ea76759189cddfbe967ae1ed 100644 (file)
@@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
        unsigned int dest;
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return;
 
        do {
@@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return;
 
        /* Set SN when the vCPU is preempted */
@@ -6669,7 +6671,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
 
        /* Checks for #GP/#SS exceptions. */
        exn = false;
-       if (is_protmode(vcpu)) {
+       if (is_long_mode(vcpu)) {
+               /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+                * non-canonical form. This is the only check on the memory
+                * destination for long mode!
+                */
+               exn = is_noncanonical_address(*ret);
+       } else if (is_protmode(vcpu)) {
                /* Protected mode: apply checks for segment validity in the
                 * following order:
                 * - segment type check (#GP(0) may be thrown)
@@ -6686,17 +6694,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
                         * execute-only code segment
                         */
                        exn = ((s.type & 0xa) == 8);
-       }
-       if (exn) {
-               kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
-               return 1;
-       }
-       if (is_long_mode(vcpu)) {
-               /* Long mode: #GP(0)/#SS(0) if the memory address is in a
-                * non-canonical form. This is an only check for long mode.
-                */
-               exn = is_noncanonical_address(*ret);
-       } else if (is_protmode(vcpu)) {
+               if (exn) {
+                       kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+                       return 1;
+               }
                /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
                 */
                exn = (s.unusable != 0);
@@ -10714,7 +10715,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return 0;
 
        vcpu->pre_pcpu = vcpu->cpu;
@@ -10780,7 +10782,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
        unsigned long flags;
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return;
 
        do {
@@ -10833,7 +10836,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
        int idx, ret = -EINVAL;
 
        if (!kvm_arch_has_assigned_device(kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP) ||
+               !kvm_vcpu_apicv_active(kvm->vcpus[0]))
                return 0;
 
        idx = srcu_read_lock(&kvm->irq_srcu);
index c805cf494154f8e7609ab89e148e210703690d90..7da5dd2057a928fd3eebed2052a6d89ee0750924 100644 (file)
@@ -1244,12 +1244,6 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
 static unsigned long max_tsc_khz;
 
-static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
-{
-       return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
-                                  vcpu->arch.virtual_tsc_shift);
-}
-
 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
 {
        u64 v = (u64)khz * (1000000 + ppm);
@@ -2314,6 +2308,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_AMD64_NB_CFG:
        case MSR_FAM10H_MMIO_CONF_BASE:
        case MSR_AMD64_BU_CFG2:
+       case MSR_IA32_PERF_CTL:
                msr_info->data = 0;
                break;
        case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
@@ -2972,6 +2967,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                              | KVM_VCPUEVENT_VALID_SMM))
                return -EINVAL;
 
+       if (events->exception.injected &&
+           (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
+               return -EINVAL;
+
        process_nmi(vcpu);
        vcpu->arch.exception.pending = events->exception.injected;
        vcpu->arch.exception.nr = events->exception.nr;
@@ -3036,6 +3035,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
        if (dbgregs->flags)
                return -EINVAL;
 
+       if (dbgregs->dr6 & ~0xffffffffull)
+               return -EINVAL;
+       if (dbgregs->dr7 & ~0xffffffffull)
+               return -EINVAL;
+
        memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
        kvm_update_dr0123(vcpu);
        vcpu->arch.dr6 = dbgregs->dr6;
@@ -7815,7 +7819,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
 
        slot = id_to_memslot(slots, id);
        if (size) {
-               if (WARN_ON(slot->npages))
+               if (slot->npages)
                        return -EEXIST;
 
                /*
index 7ce3634ab5fe6189a95163d8cd9781a923d21727..a82ca466b62e862c88a64b89bb602139fff3ab42 100644 (file)
@@ -2,6 +2,7 @@
 #define ARCH_X86_KVM_X86_H
 
 #include <linux/kvm_host.h>
+#include <asm/pvclock.h>
 #include "kvm_cache_regs.h"
 
 #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
@@ -195,6 +196,12 @@ extern unsigned int lapic_timer_advance_ns;
 
 extern struct static_key kvm_no_apic_vcpu;
 
+static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
+{
+       return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
+                                  vcpu->arch.virtual_tsc_shift);
+}
+
 /* Same "calling convention" as do_div:
  * - divide (n << 32) by base
  * - put result in n
index 1b1110fa00570e0d242926ca8f06adc37db518a5..0493c17b8a516f4212bc21fe5e32d0bce466c24f 100644 (file)
@@ -54,8 +54,8 @@ static int kasan_die_handler(struct notifier_block *self,
                             void *data)
 {
        if (val == DIE_GPF) {
-               pr_emerg("CONFIG_KASAN_INLINE enabled");
-               pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
+               pr_emerg("CONFIG_KASAN_INLINE enabled\n");
+               pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
        }
        return NOTIFY_OK;
 }
index 4eb287e25043ed17639832a1d6b3f71b528ab69e..aa0ff4b02a96e230ed17052f9ed961950371753b 100644 (file)
@@ -6,7 +6,7 @@
 #include <asm/fixmap.h>
 #include <asm/mtrr.h>
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
 
 #ifdef CONFIG_HIGHPTE
 #define PGALLOC_USER_GFP __GFP_HIGHMEM
index b2a4e2a61f6b8f41775c47b0aed020a6441e5d66..3cd69832d7f4c6f3743bbb3a190c39672c89461d 100644 (file)
@@ -396,6 +396,7 @@ int __init pci_acpi_init(void)
                return -ENODEV;
 
        printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
+       acpi_irq_penalty_init();
        pcibios_enable_irq = acpi_pci_irq_enable;
        pcibios_disable_irq = acpi_pci_irq_disable;
        x86_init.pci.init_irq = x86_init_noop;
index 6e7242be1c8744b89af4a96587fb79d3a387e77a..b226b3f497f1c75a465fa73f5171a695ea5c9e95 100644 (file)
@@ -139,7 +139,7 @@ int __init efi_alloc_page_tables(void)
        if (efi_enabled(EFI_OLD_MEMMAP))
                return 0;
 
-       gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+       gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
        efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
        if (!efi_pgd)
                return -ENOMEM;
index 009947d419a61ae2fc41f58ad9d85ece1555fec2..f2b5e6a5cf956102905f64462db824a8a355cca5 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/mtrr.h>
 #include <asm/sections.h>
 #include <asm/suspend.h>
+#include <asm/tlbflush.h>
 
 /* Defined in hibernate_asm_64.S */
 extern asmlinkage __visible int restore_image(void);
@@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_image(void);
  * kernel's text (this value is passed in the image header).
  */
 unsigned long restore_jump_address __visible;
+unsigned long jump_address_phys;
 
 /*
  * Value of the cr3 register from before the hibernation (this value is passed
@@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible;
 
 pgd_t *temp_level4_pgt __visible;
 
-void *relocated_restore_code __visible;
+unsigned long relocated_restore_code __visible;
+
+static int set_up_temporary_text_mapping(void)
+{
+       pmd_t *pmd;
+       pud_t *pud;
+
+       /*
+        * The new mapping only has to cover the page containing the image
+        * kernel's entry point (jump_address_phys), because the switch over to
+        * it is carried out by relocated code running from a page allocated
+        * specifically for this purpose and covered by the identity mapping, so
+        * the temporary kernel text mapping is only needed for the final jump.
+        * Moreover, in that mapping the virtual address of the image kernel's
+        * entry point must be the same as its virtual address in the image
+        * kernel (restore_jump_address), so the image kernel's
+        * restore_registers() code doesn't find itself in a different area of
+        * the virtual address space after switching over to the original page
+        * tables used by the image kernel.
+        */
+       pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+       if (!pud)
+               return -ENOMEM;
+
+       pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+       if (!pmd)
+               return -ENOMEM;
+
+       set_pmd(pmd + pmd_index(restore_jump_address),
+               __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
+       set_pud(pud + pud_index(restore_jump_address),
+               __pud(__pa(pmd) | _KERNPG_TABLE));
+       set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
+               __pgd(__pa(pud) | _KERNPG_TABLE));
+
+       return 0;
+}
 
 static void *alloc_pgt_page(void *context)
 {
@@ -59,9 +97,10 @@ static int set_up_temporary_mappings(void)
        if (!temp_level4_pgt)
                return -ENOMEM;
 
-       /* It is safe to reuse the original kernel mapping */
-       set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
-               init_level4_pgt[pgd_index(__START_KERNEL_map)]);
+       /* Prepare a temporary mapping for the kernel text */
+       result = set_up_temporary_text_mapping();
+       if (result)
+               return result;
 
        /* Set up the direct mapping from scratch */
        for (i = 0; i < nr_pfn_mapped; i++) {
@@ -78,19 +117,50 @@ static int set_up_temporary_mappings(void)
        return 0;
 }
 
+static int relocate_restore_code(void)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+
+       relocated_restore_code = get_safe_page(GFP_ATOMIC);
+       if (!relocated_restore_code)
+               return -ENOMEM;
+
+       memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
+
+       /* Make the page containing the relocated code executable */
+       pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
+       pud = pud_offset(pgd, relocated_restore_code);
+       if (pud_large(*pud)) {
+               set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
+       } else {
+               pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
+
+               if (pmd_large(*pmd)) {
+                       set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
+               } else {
+                       pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
+
+                       set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
+               }
+       }
+       __flush_tlb_all();
+
+       return 0;
+}
+
 int swsusp_arch_resume(void)
 {
        int error;
 
        /* We have got enough memory and from now on we cannot recover */
-       if ((error = set_up_temporary_mappings()))
+       error = set_up_temporary_mappings();
+       if (error)
                return error;
 
-       relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
-       if (!relocated_restore_code)
-               return -ENOMEM;
-       memcpy(relocated_restore_code, &core_restore_code,
-              &restore_registers - &core_restore_code);
+       error = relocate_restore_code();
+       if (error)
+               return error;
 
        restore_image();
        return 0;
@@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn)
 
 struct restore_data_record {
        unsigned long jump_address;
+       unsigned long jump_address_phys;
        unsigned long cr3;
        unsigned long magic;
 };
 
-#define RESTORE_MAGIC  0x0123456789ABCDEFUL
+#define RESTORE_MAGIC  0x123456789ABCDEF0UL
 
 /**
  *     arch_hibernation_header_save - populate the architecture specific part
@@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
 
        if (max_size < sizeof(struct restore_data_record))
                return -EOVERFLOW;
-       rdr->jump_address = restore_jump_address;
+       rdr->jump_address = (unsigned long)&restore_registers;
+       rdr->jump_address_phys = __pa_symbol(&restore_registers);
        rdr->cr3 = restore_cr3;
        rdr->magic = RESTORE_MAGIC;
        return 0;
@@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void *addr)
        struct restore_data_record *rdr = addr;
 
        restore_jump_address = rdr->jump_address;
+       jump_address_phys = rdr->jump_address_phys;
        restore_cr3 = rdr->cr3;
        return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
 }
index 4400a43b9e28f20aaac68c1a20091e683e31b799..3177c2bc26f63e9fe1bb82739be9c0c2e406f00f 100644 (file)
@@ -44,9 +44,6 @@ ENTRY(swsusp_arch_suspend)
        pushfq
        popq    pt_regs_flags(%rax)
 
-       /* save the address of restore_registers */
-       movq    $restore_registers, %rax
-       movq    %rax, restore_jump_address(%rip)
        /* save cr3 */
        movq    %cr3, %rax
        movq    %rax, restore_cr3(%rip)
@@ -57,31 +54,34 @@ ENTRY(swsusp_arch_suspend)
 ENDPROC(swsusp_arch_suspend)
 
 ENTRY(restore_image)
-       /* switch to temporary page tables */
-       movq    $__PAGE_OFFSET, %rdx
-       movq    temp_level4_pgt(%rip), %rax
-       subq    %rdx, %rax
-       movq    %rax, %cr3
-       /* Flush TLB */
-       movq    mmu_cr4_features(%rip), %rax
-       movq    %rax, %rdx
-       andq    $~(X86_CR4_PGE), %rdx
-       movq    %rdx, %cr4;  # turn off PGE
-       movq    %cr3, %rcx;  # flush TLB
-       movq    %rcx, %cr3;
-       movq    %rax, %cr4;  # turn PGE back on
-
        /* prepare to jump to the image kernel */
-       movq    restore_jump_address(%rip), %rax
-       movq    restore_cr3(%rip), %rbx
+       movq    restore_jump_address(%rip), %r8
+       movq    restore_cr3(%rip), %r9
+
+       /* prepare to switch to temporary page tables */
+       movq    temp_level4_pgt(%rip), %rax
+       movq    mmu_cr4_features(%rip), %rbx
 
        /* prepare to copy image data to their original locations */
        movq    restore_pblist(%rip), %rdx
+
+       /* jump to relocated restore code */
        movq    relocated_restore_code(%rip), %rcx
        jmpq    *%rcx
 
        /* code below has been relocated to a safe page */
 ENTRY(core_restore_code)
+       /* switch to temporary page tables */
+       movq    $__PAGE_OFFSET, %rcx
+       subq    %rcx, %rax
+       movq    %rax, %cr3
+       /* flush TLB */
+       movq    %rbx, %rcx
+       andq    $~(X86_CR4_PGE), %rcx
+       movq    %rcx, %cr4;  # turn off PGE
+       movq    %cr3, %rcx;  # flush TLB
+       movq    %rcx, %cr3;
+       movq    %rbx, %cr4;  # turn PGE back on
 .Lloop:
        testq   %rdx, %rdx
        jz      .Ldone
@@ -96,24 +96,17 @@ ENTRY(core_restore_code)
        /* progress to the next pbe */
        movq    pbe_next(%rdx), %rdx
        jmp     .Lloop
+
 .Ldone:
        /* jump to the restore_registers address from the image header */
-       jmpq    *%rax
-       /*
-        * NOTE: This assumes that the boot kernel's text mapping covers the
-        * image kernel's page containing restore_registers and the address of
-        * this page is the same as in the image kernel's text mapping (it
-        * should always be true, because the text mapping is linear, starting
-        * from 0, and is supposed to cover the entire kernel text for every
-        * kernel).
-        *
-        * code below belongs to the image kernel
-        */
+       jmpq    *%r8
 
+        /* code below belongs to the image kernel */
+       .align PAGE_SIZE
 ENTRY(restore_registers)
        FRAME_BEGIN
        /* go back to the original page tables */
-       movq    %rbx, %cr3
+       movq    %r9, %cr3
 
        /* Flush TLB, including "global" things (vmalloc) */
        movq    mmu_cr4_features(%rip), %rax
index 478a2de543a59ea797515be2f8145a503277778f..67433714b791d6f49eb954a30b092c2121cc66c5 100644 (file)
@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
 
        /* NOTE: The loop is more greedy than the cleanup_highmap variant.
         * We include the PMD passed in on _both_ boundaries. */
-       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
                        pmd++, vaddr += PMD_SIZE) {
                if (pmd_none(*pmd))
                        continue;
@@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 #endif
 }
 
-#ifdef CONFIG_X86_32
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
-       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
-       if (pte_val_ma(*ptep) & _PAGE_PRESENT)
-               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
-                              pte_val_ma(pte));
-
-       return pte;
-}
-#else /* CONFIG_X86_64 */
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
-       unsigned long pfn;
-
-       if (xen_feature(XENFEAT_writable_page_tables) ||
-           xen_feature(XENFEAT_auto_translated_physmap) ||
-           xen_start_info->mfn_list >= __START_KERNEL_map)
-               return pte;
-
-       /*
-        * Pages belonging to the initial p2m list mapped outside the default
-        * address range must be mapped read-only. This region contains the
-        * page tables for mapping the p2m list, too, and page tables MUST be
-        * mapped read-only.
-        */
-       pfn = pte_pfn(pte);
-       if (pfn >= xen_start_info->first_p2m_pfn &&
-           pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
-               pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
-
-       return pte;
-}
-#endif /* CONFIG_X86_64 */
-
 /*
  * Init-time set_pte while constructing initial pagetables, which
  * doesn't allow RO page table pages to be remapped RW.
@@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
  * so always write the PTE directly and rely on Xen trapping and
  * emulating any updates as necessary.
  */
-static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+__visible pte_t xen_make_pte_init(pteval_t pte)
 {
-       if (pte_mfn(pte) != INVALID_P2M_ENTRY)
-               pte = mask_rw_pte(ptep, pte);
-       else
-               pte = __pte_ma(0);
+#ifdef CONFIG_X86_64
+       unsigned long pfn;
+
+       /*
+        * Pages belonging to the initial p2m list mapped outside the default
+        * address range must be mapped read-only. This region contains the
+        * page tables for mapping the p2m list, too, and page tables MUST be
+        * mapped read-only.
+        */
+       pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+       if (xen_start_info->mfn_list < __START_KERNEL_map &&
+           pfn >= xen_start_info->first_p2m_pfn &&
+           pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
+               pte &= ~_PAGE_RW;
+#endif
+       pte = pte_pfn_to_mfn(pte);
+       return native_make_pte(pte);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
 
+static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+#ifdef CONFIG_X86_32
+       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+       if (pte_mfn(pte) != INVALID_P2M_ENTRY
+           && pte_val_ma(*ptep) & _PAGE_PRESENT)
+               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+                              pte_val_ma(pte));
+#endif
        native_set_pte(ptep, pte);
 }
 
@@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void)
        pv_mmu_ops.alloc_pud = xen_alloc_pud;
        pv_mmu_ops.release_pud = xen_release_pud;
 #endif
+       pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
 
 #ifdef CONFIG_X86_64
        pv_mmu_ops.write_cr3 = &xen_write_cr3;
@@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
        .pte_val = PV_CALLEE_SAVE(xen_pte_val),
        .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
 
-       .make_pte = PV_CALLEE_SAVE(xen_make_pte),
+       .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
        .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
 
 #ifdef CONFIG_X86_PAE
index cab9f766bb06b28b8f0f8f35583e70ae50adb30e..dd2a49a8aacc6b1dfaeac32980c94c71af991ed9 100644 (file)
@@ -182,7 +182,7 @@ static void * __ref alloc_p2m_page(void)
        if (unlikely(!slab_is_available()))
                return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
 
-       return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+       return (void *)__get_free_page(GFP_KERNEL);
 }
 
 static void __ref free_p2m_page(void *p)
index d38eb9237e645cf5affd4964d8d289abb1a2ee4f..1065bc8bcae56bba87f8d1a55b2c5155d489777d 100644 (file)
@@ -44,7 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
        pte_t *ptep;
        int i;
 
-       ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       ptep = (pte_t *)__get_free_page(GFP_KERNEL);
        if (!ptep)
                return NULL;
        for (i = 0; i < 1024; i++)
index 23d7f301a1967483ec79a383a1a317881caf3358..9e29dc35169560a223ae7eea9dffc80da15fc7f9 100644 (file)
@@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                ret = submit_bio_wait(type, bio);
                if (ret == -EOPNOTSUPP)
                        ret = 0;
+               bio_put(bio);
        }
        blk_finish_plug(&plug);
 
@@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                }
        }
 
-       if (bio)
+       if (bio) {
                ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
+               bio_put(bio);
+       }
        return ret != -EOPNOTSUPP ? ret : 0;
 }
 EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
                }
        }
 
-       if (bio)
-               return submit_bio_wait(WRITE, bio);
+       if (bio) {
+               ret = submit_bio_wait(WRITE, bio);
+               bio_put(bio);
+               return ret;
+       }
        return 0;
 }
 
index 29cbc1b5fbdba0ee89874c662ab441b2a817e177..f9b9049b1284cc8adf65c1eed611e4c09d2d6584 100644 (file)
@@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q)) {
-               if (blk_attempt_plug_merge(q, bio, &request_count,
-                                          &same_queue_rq))
-                       return BLK_QC_T_NONE;
-       } else
-               request_count = blk_plug_queued_count(q);
+       if (!is_flush_fua && !blk_queue_nomerges(q) &&
+           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+               return BLK_QC_T_NONE;
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
@@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, NULL))
-               return BLK_QC_T_NONE;
+       if (!is_flush_fua && !blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+                       return BLK_QC_T_NONE;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
index cc7800e9eb441e2b7737a152f0dbb60182821408..01b8116298a13b5463e7969ce66c0b037bcfccd5 100644 (file)
@@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
        if (ret)
                goto out;
        ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+       task_lock(p);
        if (p->io_context)
                ret = p->io_context->ioprio;
+       task_unlock(p);
 out:
        return ret;
 }
index e28e912000a71ab701f61fc09faa9570f81afe09..331f6baf2df8ea39148a49f5f7408238adc2fb0f 100644 (file)
@@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
        tristate "Asymmetric public-key crypto algorithm subtype"
        select MPILIB
        select CRYPTO_HASH_INFO
+       select CRYPTO_AKCIPHER
        help
          This option provides support for asymmetric public key type handling.
          If signature generation and/or verification are to be used,
index 6a76d5c70ef6e1ddb742bfc94671a499184c891d..9492e1c22d3891417d3e42baf2e99661041c1135 100644 (file)
@@ -124,5 +124,10 @@ int mscode_note_digest(void *context, size_t hdrlen,
        struct pefile_context *ctx = context;
 
        ctx->digest = kmemdup(value, vlen, GFP_KERNEL);
-       return ctx->digest ? 0 : -ENOMEM;
+       if (!ctx->digest)
+               return -ENOMEM;
+
+       ctx->digest_len = vlen;
+
+       return 0;
 }
index 44b746e9df1b4110e6f8da4ac5b23419cb63feac..2ffd69769466082eaf55cdfe71fb67704e0364af 100644 (file)
@@ -227,7 +227,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
                                if (asymmetric_key_id_same(p->id, auth))
                                        goto found_issuer_check_skid;
                        }
-               } else {
+               } else if (sig->auth_ids[1]) {
                        auth = sig->auth_ids[1];
                        pr_debug("- want %*phN\n", auth->len, auth->data);
                        for (p = pkcs7->certs; p; p = p->next) {
index ac4bddf669de2195bce0864a28308031602245da..19d1afb9890f660e43ee95261cf0a703e44f92c6 100644 (file)
@@ -87,7 +87,7 @@ int restrict_link_by_signature(struct key *trust_keyring,
 
        sig = payload->data[asym_auth];
        if (!sig->auth_ids[0] && !sig->auth_ids[1])
-               return 0;
+               return -ENOKEY;
 
        if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid))
                return -EPERM;
index 43fe85f20d577b4f3d1bbd6576b6d752bc578531..7097a3395b2529fd123b2c0b14bcce6992fdee49 100644 (file)
@@ -455,6 +455,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
        [CRYPTO_MSG_NEWALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
        [CRYPTO_MSG_DELALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
        [CRYPTO_MSG_UPDATEALG   - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
+       [CRYPTO_MSG_GETALG      - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
        [CRYPTO_MSG_DELRNG      - CRYPTO_MSG_BASE] = 0,
 };
 
index ead8dc0d084e749e35733abc164f1e209aca3f6f..8ba426635b1b39631824f7bb2b1f2e3a056bafa4 100644 (file)
@@ -102,10 +102,10 @@ struct pkcs1pad_inst_ctx {
 };
 
 struct pkcs1pad_request {
-       struct akcipher_request child_req;
-
        struct scatterlist in_sg[3], out_sg[2];
        uint8_t *in_buf, *out_buf;
+
+       struct akcipher_request child_req;
 };
 
 static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
index 1f4128487dd4be1b342df522e010de6c29c6756b..dee86925a9a107f028ef14adf110eb583500c0c7 100644 (file)
@@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
        crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
        ret = n;
 out:
-       acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
+       acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
        return ret;
 }
 
@@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
        crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
        ret = n;
 out:
-       acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
+       acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
        return n;
 }
 
index 0d92d0f915e9acb0f04dcbce9583fcd16d7c8422..c7ba948d253c456fa346452d491fab214489f0ba 100644 (file)
@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
                pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
 
                pr->pblk = object.processor.pblk_address;
-
-               /*
-                * We don't care about error returns - we just try to mark
-                * these reserved so that nobody else is confused into thinking
-                * that this region might be unused..
-                *
-                * (In particular, allocating the IO range for Cardbus)
-                */
-               request_region(pr->throttling.address, 6, "ACPI CPU throttle");
        }
 
        /*
index 3d5b8a099351e3c401de7e739b4d8bb043fab86e..c1d138e128cba4aeddbc54a807a6f235142c982c 100644 (file)
@@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
 }
 
 int acpi_video_get_levels(struct acpi_device *device,
-                         struct acpi_video_device_brightness **dev_br)
+                         struct acpi_video_device_brightness **dev_br,
+                         int *pmax_level)
 {
        union acpi_object *obj = NULL;
        int i, max_level = 0, count = 0, level_ac_battery = 0;
@@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device,
 
        br->count = count;
        *dev_br = br;
+       if (pmax_level)
+               *pmax_level = max_level;
 
 out:
        kfree(obj);
@@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
        struct acpi_video_device_brightness *br = NULL;
        int result = -EINVAL;
 
-       result = acpi_video_get_levels(device->dev, &br);
+       result = acpi_video_get_levels(device->dev, &br, &max_level);
        if (result)
                return result;
        device->brightness = br;
@@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
 
        mutex_lock(&video->device_list_lock);
        list_for_each_entry(dev, &video->video_device_list, entry) {
-               if (!acpi_video_device_lcd_query_levels(dev, &levels))
+               if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
                        kfree(levels);
        }
        mutex_unlock(&video->device_list_lock);
index 0f18dbc9a37fd7e80e80a65cb314ba3ac416da73..3b7fb99362b6945ecc5ebe3f192405718ecd33fb 100644 (file)
@@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value,
 static u8
 acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
 {
-       u64 address;
-
        if (!reg->access_width) {
+               if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+                       max_bit_width = 32;
+               }
+
                /*
                 * Detect old register descriptors where only the bit_width field
-                * makes senses. The target address is copied to handle possible
-                * alignment issues.
+                * makes senses.
                 */
-               ACPI_MOVE_64_TO_64(&address, &reg->address);
-               if (!reg->bit_offset && reg->bit_width &&
+               if (reg->bit_width < max_bit_width &&
+                   !reg->bit_offset && reg->bit_width &&
                    ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
-                   ACPI_IS_ALIGNED(reg->bit_width, 8) &&
-                   ACPI_IS_ALIGNED(address, reg->bit_width)) {
+                   ACPI_IS_ALIGNED(reg->bit_width, 8)) {
                        return (reg->bit_width);
-               } else {
-                       if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
-                               return (32);
-                       } else {
-                               return (max_bit_width);
-                       }
                }
+               return (max_bit_width);
        } else {
                return (1 << (reg->access_width + 2));
        }
@@ -311,12 +306,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
 acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
 {
        u64 address;
-       u8 access_width;
-       u32 bit_width;
-       u8 bit_offset;
-       u64 value64;
-       u32 new_value32, old_value32;
-       u8 index;
        acpi_status status;
 
        ACPI_FUNCTION_NAME(hw_write);
@@ -328,145 +317,23 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
                return (status);
        }
 
-       /* Convert access_width into number of bits based */
-
-       access_width = acpi_hw_get_access_bit_width(reg, 32);
-       bit_width = reg->bit_offset + reg->bit_width;
-       bit_offset = reg->bit_offset;
-
        /*
         * Two address spaces supported: Memory or IO. PCI_Config is
         * not supported here because the GAS structure is insufficient
         */
-       index = 0;
-       while (bit_width) {
-               /*
-                * Use offset style bit reads because "Index * AccessWidth" is
-                * ensured to be less than 32-bits by acpi_hw_validate_register().
-                */
-               new_value32 = ACPI_GET_BITS(&value, index * access_width,
-                                           ACPI_MASK_BITS_ABOVE_32
-                                           (access_width));
-
-               if (bit_offset >= access_width) {
-                       bit_offset -= access_width;
-               } else {
-                       /*
-                        * Use offset style bit masks because access_width is ensured
-                        * to be less than 32-bits by acpi_hw_validate_register() and
-                        * bit_offset/bit_width is less than access_width here.
-                        */
-                       if (bit_offset) {
-                               new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
-                       }
-                       if (bit_width < access_width) {
-                               new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
-                       }
-
-                       if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
-                               if (bit_offset || bit_width < access_width) {
-                                       /*
-                                        * Read old values in order not to modify the bits that
-                                        * are beyond the register bit_width/bit_offset setting.
-                                        */
-                                       status =
-                                           acpi_os_read_memory((acpi_physical_address)
-                                                               address +
-                                                               index *
-                                                               ACPI_DIV_8
-                                                               (access_width),
-                                                               &value64,
-                                                               access_width);
-                                       old_value32 = (u32)value64;
-
-                                       /*
-                                        * Use offset style bit masks because access_width is
-                                        * ensured to be less than 32-bits by
-                                        * acpi_hw_validate_register() and bit_offset/bit_width is
-                                        * less than access_width here.
-                                        */
-                                       if (bit_offset) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_ABOVE
-                                                   (bit_offset);
-                                               bit_offset = 0;
-                                       }
-                                       if (bit_width < access_width) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_BELOW
-                                                   (bit_width);
-                                       }
-
-                                       new_value32 |= old_value32;
-                               }
-
-                               value64 = (u64)new_value32;
-                               status =
-                                   acpi_os_write_memory((acpi_physical_address)
-                                                        address +
-                                                        index *
-                                                        ACPI_DIV_8
-                                                        (access_width),
-                                                        value64, access_width);
-                       } else {        /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
-                               if (bit_offset || bit_width < access_width) {
-                                       /*
-                                        * Read old values in order not to modify the bits that
-                                        * are beyond the register bit_width/bit_offset setting.
-                                        */
-                                       status =
-                                           acpi_hw_read_port((acpi_io_address)
-                                                             address +
-                                                             index *
-                                                             ACPI_DIV_8
-                                                             (access_width),
-                                                             &old_value32,
-                                                             access_width);
-
-                                       /*
-                                        * Use offset style bit masks because access_width is
-                                        * ensured to be less than 32-bits by
-                                        * acpi_hw_validate_register() and bit_offset/bit_width is
-                                        * less than access_width here.
-                                        */
-                                       if (bit_offset) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_ABOVE
-                                                   (bit_offset);
-                                               bit_offset = 0;
-                                       }
-                                       if (bit_width < access_width) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_BELOW
-                                                   (bit_width);
-                                       }
-
-                                       new_value32 |= old_value32;
-                               }
-
-                               status = acpi_hw_write_port((acpi_io_address)
-                                                           address +
-                                                           index *
-                                                           ACPI_DIV_8
-                                                           (access_width),
-                                                           new_value32,
-                                                           access_width);
-                       }
-               }
-
-               /*
-                * Index * access_width is ensured to be less than 32-bits by
-                * acpi_hw_validate_register().
-                */
-               bit_width -=
-                   bit_width > access_width ? access_width : bit_width;
-               index++;
+       if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+               status = acpi_os_write_memory((acpi_physical_address)
+                                             address, (u64)value,
+                                             reg->bit_width);
+       } else {                /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+               status = acpi_hw_write_port((acpi_io_address)
+                                           address, value, reg->bit_width);
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_IO,
                          "Wrote: %8.8X width %2d   to %8.8X%8.8X (%s)\n",
-                         value, access_width, ACPI_FORMAT_UINT64(address),
+                         value, reg->bit_width, ACPI_FORMAT_UINT64(address),
                          acpi_ut_get_region_name(reg->space_id)));
 
        return (status);
index 31e8da648fffb9b6db10cda340bcc394f3a95994..262ca31b86d9311c05bd840c7fb8bb0aa3b09b34 100644 (file)
@@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void)
         * Maybe EC region is required at bus_scan/acpi_get_devices. So it
         * is necessary to enable it as early as possible.
         */
-       acpi_boot_ec_enable();
+       acpi_ec_dsdt_probe();
 
        printk(KERN_INFO PREFIX "Interpreter enabled\n");
 
index 0e70181f150c9eae5e00b8f9a24a41382ee41bbc..290d6f5be44b414b7d89ec0a48be1b234330c254 100644 (file)
@@ -1331,8 +1331,6 @@ static int ec_install_handlers(struct acpi_ec *ec)
 
 static void ec_remove_handlers(struct acpi_ec *ec)
 {
-       acpi_ec_stop(ec, false);
-
        if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
                if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
                                        ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
@@ -1340,6 +1338,19 @@ static void ec_remove_handlers(struct acpi_ec *ec)
                clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
        }
 
+       /*
+        * Stops handling the EC transactions after removing the operation
+        * region handler. This is required because _REG(DISCONNECT)
+        * invoked during the removal can result in new EC transactions.
+        *
+        * Flushes the EC requests and thus disables the GPE before
+        * removing the GPE handler. This is required by the current ACPICA
+        * GPE core. ACPICA GPE core will automatically disable a GPE when
+        * it is indicated but there is no way to handle it. So the drivers
+        * must disable the GPEs prior to removing the GPE handlers.
+        */
+       acpi_ec_stop(ec, false);
+
        if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
                if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
                                        &acpi_ec_gpe_handler)))
@@ -1446,10 +1457,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
        return AE_OK;
 }
 
-int __init acpi_boot_ec_enable(void)
+static const struct acpi_device_id ec_device_ids[] = {
+       {"PNP0C09", 0},
+       {"", 0},
+};
+
+int __init acpi_ec_dsdt_probe(void)
 {
-       if (!boot_ec)
+       acpi_status status;
+
+       if (boot_ec)
                return 0;
+
+       /*
+        * Finding EC from DSDT if there is no ECDT EC available. When this
+        * function is invoked, ACPI tables have been fully loaded, we can
+        * walk namespace now.
+        */
+       boot_ec = make_acpi_ec();
+       if (!boot_ec)
+               return -ENOMEM;
+       status = acpi_get_devices(ec_device_ids[0].id,
+                                 ec_parse_device, boot_ec, NULL);
+       if (ACPI_FAILURE(status) || !boot_ec->handle)
+               return -ENODEV;
        if (!ec_install_handlers(boot_ec)) {
                first_ec = boot_ec;
                return 0;
@@ -1457,11 +1488,6 @@ int __init acpi_boot_ec_enable(void)
        return -EFAULT;
 }
 
-static const struct acpi_device_id ec_device_ids[] = {
-       {"PNP0C09", 0},
-       {"", 0},
-};
-
 #if 0
 /*
  * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
index 9bb0773d39bfd14f87f0e8dbfe5a1ed31b54162d..27cc7feabfe4322dfa578c2e9af8464e4f4feaf7 100644 (file)
@@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data);
 
 int acpi_ec_init(void);
 int acpi_ec_ecdt_probe(void);
-int acpi_boot_ec_enable(void);
+int acpi_ec_dsdt_probe(void);
 void acpi_ec_block_transactions(void);
 void acpi_ec_unblock_transactions(void);
 void acpi_ec_unblock_transactions_early(void);
index 2215fc847fa90c572b40b426a5edc2a88f5792a2..1f0e06065ae6c5180d2e8deae426aef0966fa3dd 100644 (file)
@@ -928,7 +928,7 @@ static ssize_t format_show(struct device *dev,
 {
        struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
 
-       return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->code));
+       return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
 }
 static DEVICE_ATTR_RO(format);
 
@@ -961,8 +961,8 @@ static ssize_t format1_show(struct device *dev,
                                continue;
                        if (nfit_dcr->dcr->code == dcr->code)
                                continue;
-                       rc = sprintf(buf, "%#x\n",
-                                       be16_to_cpu(nfit_dcr->dcr->code));
+                       rc = sprintf(buf, "0x%04x\n",
+                                       le16_to_cpu(nfit_dcr->dcr->code));
                        break;
                }
                if (rc != ENXIO)
@@ -1151,9 +1151,10 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                if (disable_vendor_specific)
                        dsm_mask &= ~(1 << 8);
        } else {
-               dev_err(dev, "unknown dimm command family\n");
+               dev_dbg(dev, "unknown dimm command family\n");
                nfit_mem->family = -1;
-               return force_enable_dimms ? 0 : -ENODEV;
+               /* DSMs are optional, continue loading the driver... */
+               return 0;
        }
 
        uuid = to_nfit_uuid(nfit_mem->family);
index 11cb38348aef600cac3215985d98bc7559e37d63..02b9ea1e8d2e6cc6ad978493529aa0ec59f606d8 100644 (file)
@@ -53,12 +53,12 @@ enum nfit_uuids {
 };
 
 /*
- * Region format interface codes are stored as an array of bytes in the
- * NFIT DIMM Control Region structure
+ * Region format interface codes are stored with the interface as the
+ * LSB and the function as the MSB.
  */
-#define NFIT_FIC_BYTE cpu_to_be16(0x101) /* byte-addressable energy backed */
-#define NFIT_FIC_BLK cpu_to_be16(0x201) /* block-addressable non-energy backed */
-#define NFIT_FIC_BYTEN cpu_to_be16(0x301) /* byte-addressable non-energy backed */
+#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
+#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
+#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
 
 enum {
        NFIT_BLK_READ_FLUSH = 1,
index 8fc7323ed3e847197cfdf1e6fdee728cdfdd8e7b..c983bf733ad37d7b608c9410108dcef32cdbf02b 100644 (file)
@@ -470,6 +470,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
 {
        struct acpi_pci_link *link;
        int penalty = 0;
+       int i;
 
        list_for_each_entry(link, &acpi_link_list, list) {
                /*
@@ -478,18 +479,14 @@ static int acpi_irq_pci_sharing_penalty(int irq)
                 */
                if (link->irq.active && link->irq.active == irq)
                        penalty += PIRQ_PENALTY_PCI_USING;
-               else {
-                       int i;
-
-                       /*
-                        * If a link is inactive, penalize the IRQs it
-                        * might use, but not as severely.
-                        */
-                       for (i = 0; i < link->irq.possible_count; i++)
-                               if (link->irq.possible[i] == irq)
-                                       penalty += PIRQ_PENALTY_PCI_POSSIBLE /
-                                               link->irq.possible_count;
-               }
+
+               /*
+                * penalize the IRQs PCI might use, but not as severely.
+                */
+               for (i = 0; i < link->irq.possible_count; i++)
+                       if (link->irq.possible[i] == irq)
+                               penalty += PIRQ_PENALTY_PCI_POSSIBLE /
+                                       link->irq.possible_count;
        }
 
        return penalty;
@@ -499,9 +496,6 @@ static int acpi_irq_get_penalty(int irq)
 {
        int penalty = 0;
 
-       if (irq < ACPI_MAX_ISA_IRQS)
-               penalty += acpi_isa_irq_penalty[irq];
-
        /*
        * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
        * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
@@ -516,10 +510,49 @@ static int acpi_irq_get_penalty(int irq)
                        penalty += PIRQ_PENALTY_PCI_USING;
        }
 
+       if (irq < ACPI_MAX_ISA_IRQS)
+               return penalty + acpi_isa_irq_penalty[irq];
+
        penalty += acpi_irq_pci_sharing_penalty(irq);
        return penalty;
 }
 
+int __init acpi_irq_penalty_init(void)
+{
+       struct acpi_pci_link *link;
+       int i;
+
+       /*
+        * Update penalties to facilitate IRQ balancing.
+        */
+       list_for_each_entry(link, &acpi_link_list, list) {
+
+               /*
+                * reflect the possible and active irqs in the penalty table --
+                * useful for breaking ties.
+                */
+               if (link->irq.possible_count) {
+                       int penalty =
+                           PIRQ_PENALTY_PCI_POSSIBLE /
+                           link->irq.possible_count;
+
+                       for (i = 0; i < link->irq.possible_count; i++) {
+                               if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
+                                       acpi_isa_irq_penalty[link->irq.
+                                                        possible[i]] +=
+                                           penalty;
+                       }
+
+               } else if (link->irq.active &&
+                               (link->irq.active < ACPI_MAX_ISA_IRQS)) {
+                       acpi_isa_irq_penalty[link->irq.active] +=
+                           PIRQ_PENALTY_PCI_POSSIBLE;
+               }
+       }
+
+       return 0;
+}
+
 static int acpi_irq_balance = -1;      /* 0: static, 1: balance */
 
 static int acpi_pci_link_allocate(struct acpi_pci_link *link)
@@ -839,7 +872,7 @@ void acpi_penalize_isa_irq(int irq, int active)
 {
        if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
                acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
-                       active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING;
+                 (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
 }
 
 bool acpi_isa_irq_available(int irq)
index f170d746336d355c7212bbac37b15d2a0164af1a..c72e64893d039f9e4d32d19da926ccd8f71a200a 100644 (file)
@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
        if (!pr->flags.throttling)
                return -ENODEV;
 
+       /*
+        * We don't care about error returns - we just try to mark
+        * these reserved so that nobody else is confused into thinking
+        * that this region might be unused..
+        *
+        * (In particular, allocating the IO range for Cardbus)
+        */
+       request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+
        pr->throttling.state = 0;
 
        duty_mask = pr->throttling.state_count - 1;
index 6e702ab572204deb87d96c0bd399b42d42f2a1f0..1d31c0c0fc20b48c287d2df293776cf9c1e52329 100644 (file)
@@ -137,7 +137,7 @@ static const struct ata_port_info *ahci_seattle_get_port_info(
        u32 val;
 
        plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
-       if (IS_ERR(plat_data))
+       if (!plat_data)
                return &ahci_port_info;
 
        plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
index 6be7770f68e9a16a2aa8555727358c7cf1df5735..31c183aed368c69c9544c92ecb19853548fb8784 100644 (file)
@@ -4314,6 +4314,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         */
        { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
 
+       /*
+        * Device times out with higher max sects.
+        * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+        */
+       { "LITEON CX1-JB256-HP", NULL,          ATA_HORKAGE_MAX_SEC_1024 },
+
        /* Devices we expect to fail diagnostics */
 
        /* Devices where NCQ should be avoided */
index 61dc7a99e89a05e64395d1186913685667c8afdb..c6f0174589581e3ac4e07dd791eb6c1604eca219 100644 (file)
@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
        ata_scsi_port_error_handler(host, ap);
 
        /* finish or retry handled scmd's and clean up */
-       WARN_ON(host->host_failed || !list_empty(&eh_work_q));
+       WARN_ON(!list_empty(&eh_work_q));
 
        DPRINTK("EXIT\n");
 }
index bd74ee555278513118d057e832b3ec599923bb69..745489a1c86ab2c11701b124f8998126608c398a 100644 (file)
@@ -986,7 +986,7 @@ static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
                 * Looks like a lot of fuss, but it avoids an unnecessary
                 * +1 usec read-after-write delay for unaffected registers.
                 */
-               laddr = (long)addr & 0xffff;
+               laddr = (unsigned long)addr & 0xffff;
                if (laddr >= 0x300 && laddr <= 0x33c) {
                        laddr &= 0x000f;
                        if (laddr == 0x4 || laddr == 0xc) {
index a969a7e443be26edded6d21245c0b29f24450a72..85aaf2222587bfcc9086686d44ee4e042a46c233 100644 (file)
@@ -181,13 +181,17 @@ static char *res_strings[] = {
        "reserved 27", 
        "reserved 28", 
        "reserved 29", 
-       "reserved 30", 
+       "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
        "reassembly abort: no buffers", 
        "receive buffer overflow", 
        "change in GFC", 
        "receive buffer full", 
        "low priority discard - no receive descriptor", 
        "low priority discard - missing end of packet", 
+       "reserved 37",
+       "reserved 38",
+       "reserved 39",
+       "reseverd 40",
        "reserved 41", 
        "reserved 42", 
        "reserved 43", 
index 7d00f2994738b45d91b6f3b0231856f35341fcb2..809dd1e0209154b5fba68c4b9955834a709a4b0d 100644 (file)
@@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev)
        /* make the ptr point to the corresponding buffer desc entry */  
        buf_desc_ptr += desc;     
         if (!desc || (desc > iadev->num_rx_desc) || 
-                      ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { 
+                      ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
             free_desc(dev, desc);
             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
             return -1;
index 6b2a84e7f2bece5643c2b68a17254067213b99ce..2609ba20b39622b1268323cd360171a3e545cbc1 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y                  += power/
 obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_ISA)      += isa.o
+obj-$(CONFIG_ISA_BUS_API)      += isa.o
 obj-$(CONFIG_FW_LOADER)        += firmware_class.o
 obj-$(CONFIG_NUMA)     += node.o
 obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
index 91dba65d72645509b74e289a04402af7696009b4..cd6ccdcf9df0c5ef2a37fabb2b4b7bbfa3c88755 100644 (file)
@@ -180,4 +180,4 @@ static int __init isa_bus_init(void)
        return error;
 }
 
-device_initcall(isa_bus_init);
+postcore_initcall(isa_bus_init);
index db930d3ee31259f6a17b9dd33eb16caee4e12f5c..2a215780eda25c7c33ac44c136c4fadd36238ab8 100644 (file)
@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
 
 static void module_create_drivers_dir(struct module_kobject *mk)
 {
-       if (!mk || mk->drivers_dir)
-               return;
+       static DEFINE_MUTEX(drivers_dir_mutex);
 
-       mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+       mutex_lock(&drivers_dir_mutex);
+       if (mk && !mk->drivers_dir)
+               mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+       mutex_unlock(&drivers_dir_mutex);
 }
 
 void module_add_driver(struct module *mod, struct device_driver *drv)
index 83d6e7ba1a343db48a3de678a3ba5b7df973c221..8c3434bdb26dee8c23a637798b18a3963666ee70 100644 (file)
@@ -211,7 +211,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
                }
 
                /* Mark opp-table as multiple CPUs are sharing it now */
-               opp_table->shared_opp = true;
+               opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
        }
 unlock:
        mutex_unlock(&opp_table_lock);
@@ -227,7 +227,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
  *
  * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  *
- * Returns -ENODEV if OPP table isn't already present.
+ * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
+ * table's status is access-unknown.
  *
  * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
@@ -249,9 +250,14 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
                goto unlock;
        }
 
+       if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
        cpumask_clear(cpumask);
 
-       if (opp_table->shared_opp) {
+       if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
                list_for_each_entry(opp_dev, &opp_table->dev_list, node)
                        cpumask_set_cpu(opp_dev->dev->id, cpumask);
        } else {
index 94d2010558e363f7991c89e9c0e3037822b761ae..1dfd3dd926241d9e2c9514c99d78b7bb736b0260 100644 (file)
@@ -34,7 +34,10 @@ static struct opp_table *_managed_opp(const struct device_node *np)
                         * But the OPPs will be considered as shared only if the
                         * OPP table contains a "opp-shared" property.
                         */
-                       return opp_table->shared_opp ? opp_table : NULL;
+                       if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
+                               return opp_table;
+
+                       return NULL;
                }
        }
 
@@ -353,7 +356,10 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
        }
 
        opp_table->np = opp_np;
-       opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+       if (of_property_read_bool(opp_np, "opp-shared"))
+               opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
+       else
+               opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
 
        mutex_unlock(&opp_table_lock);
 
index 20f3be22e060f38761c9f949d776b33e7de756f0..fabd5ca1a08394b05a1b75cd528ad4f66b58ecf7 100644 (file)
@@ -119,6 +119,12 @@ struct opp_device {
 #endif
 };
 
+enum opp_table_access {
+       OPP_TABLE_ACCESS_UNKNOWN = 0,
+       OPP_TABLE_ACCESS_EXCLUSIVE = 1,
+       OPP_TABLE_ACCESS_SHARED = 2,
+};
+
 /**
  * struct opp_table - Device opp structure
  * @node:      table node - contains the devices with OPPs that
@@ -166,7 +172,7 @@ struct opp_table {
        /* For backward compatibility with v1 bindings */
        unsigned int voltage_tolerance_v1;
 
-       bool shared_opp;
+       enum opp_table_access shared_opp;
        struct dev_pm_opp *suspend_opp;
 
        unsigned int *supported_hw;
index eda09090cb523f5ddc2a6c174502110ae8999a57..f642c4264c277bc05d98dc99eb15ac8091886ba5 100644 (file)
@@ -8,8 +8,6 @@
 #include <linux/bcma/bcma.h>
 #include <linux/delay.h>
 
-#define BCMA_CORE_SIZE         0x1000
-
 #define bcma_err(bus, fmt, ...) \
        pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
 #define bcma_warn(bus, fmt, ...) \
index d597e432e195305906d357e113b90659cbdadbd6..ab19adb07a126ae0cae4f0d37170a2929367b115 100644 (file)
@@ -1750,7 +1750,7 @@ aoecmd_init(void)
        int ret;
 
        /* get_zeroed_page returns page with ref count 1 */
-       p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+       p = (void *) get_zeroed_page(GFP_KERNEL);
        if (!p)
                return -ENOMEM;
        empty_page = virt_to_page(p);
index 31e73a7a40f20eb6de0337c79aade131c4b70df4..6a48ed41963ff9d215665b1423d20567fc50ee06 100644 (file)
@@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
        debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
        debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
        debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
-       debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
+       debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
 
        return 0;
 }
index ca13df8546396cf6a7dd72c3a288f27623681baf..fcc5b4e0aef29ed8d5e863e0277687e1abe700e6 100644 (file)
@@ -207,6 +207,9 @@ struct blkfront_info
        struct blk_mq_tag_set tag_set;
        struct blkfront_ring_info *rinfo;
        unsigned int nr_rings;
+       /* Save uncomplete reqs and bios for migration. */
+       struct list_head requests;
+       struct bio_list bio_list;
 };
 
 static unsigned int nr_minors;
@@ -874,8 +877,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
                          const struct blk_mq_queue_data *qd)
 {
        unsigned long flags;
-       struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
+       int qid = hctx->queue_num;
+       struct blkfront_info *info = hctx->queue->queuedata;
+       struct blkfront_ring_info *rinfo = NULL;
 
+       BUG_ON(info->nr_rings <= qid);
+       rinfo = &info->rinfo[qid];
        blk_mq_start_request(qd->rq);
        spin_lock_irqsave(&rinfo->ring_lock, flags);
        if (RING_FULL(&rinfo->ring))
@@ -901,20 +908,9 @@ out_busy:
        return BLK_MQ_RQ_QUEUE_BUSY;
 }
 
-static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
-                           unsigned int index)
-{
-       struct blkfront_info *info = (struct blkfront_info *)data;
-
-       BUG_ON(info->nr_rings <= index);
-       hctx->driver_data = &info->rinfo[index];
-       return 0;
-}
-
 static struct blk_mq_ops blkfront_mq_ops = {
        .queue_rq = blkif_queue_rq,
        .map_queue = blk_mq_map_queue,
-       .init_hctx = blk_mq_init_hctx,
 };
 
 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -950,6 +946,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
                return PTR_ERR(rq);
        }
 
+       rq->queuedata = info;
        queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
 
        if (info->feature_discard) {
@@ -2008,69 +2005,22 @@ static int blkif_recover(struct blkfront_info *info)
 {
        unsigned int i, r_index;
        struct request *req, *n;
-       struct blk_shadow *copy;
        int rc;
        struct bio *bio, *cloned_bio;
-       struct bio_list bio_list, merge_bio;
        unsigned int segs, offset;
        int pending, size;
        struct split_bio *split_bio;
-       struct list_head requests;
 
        blkfront_gather_backend_features(info);
        segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
        blk_queue_max_segments(info->rq, segs);
-       bio_list_init(&bio_list);
-       INIT_LIST_HEAD(&requests);
 
        for (r_index = 0; r_index < info->nr_rings; r_index++) {
-               struct blkfront_ring_info *rinfo;
-
-               rinfo = &info->rinfo[r_index];
-               /* Stage 1: Make a safe copy of the shadow state. */
-               copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
-                              GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
-               if (!copy)
-                       return -ENOMEM;
-
-               /* Stage 2: Set up free list. */
-               memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
-               for (i = 0; i < BLK_RING_SIZE(info); i++)
-                       rinfo->shadow[i].req.u.rw.id = i+1;
-               rinfo->shadow_free = rinfo->ring.req_prod_pvt;
-               rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
+               struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
 
                rc = blkfront_setup_indirect(rinfo);
-               if (rc) {
-                       kfree(copy);
+               if (rc)
                        return rc;
-               }
-
-               for (i = 0; i < BLK_RING_SIZE(info); i++) {
-                       /* Not in use? */
-                       if (!copy[i].request)
-                               continue;
-
-                       /*
-                        * Get the bios in the request so we can re-queue them.
-                        */
-                       if (copy[i].request->cmd_flags &
-                           (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
-                               /*
-                                * Flush operations don't contain bios, so
-                                * we need to requeue the whole request
-                                */
-                               list_add(&copy[i].request->queuelist, &requests);
-                               continue;
-                       }
-                       merge_bio.head = copy[i].request->bio;
-                       merge_bio.tail = copy[i].request->biotail;
-                       bio_list_merge(&bio_list, &merge_bio);
-                       copy[i].request->bio = NULL;
-                       blk_end_request_all(copy[i].request, 0);
-               }
-
-               kfree(copy);
        }
        xenbus_switch_state(info->xbdev, XenbusStateConnected);
 
@@ -2085,7 +2035,7 @@ static int blkif_recover(struct blkfront_info *info)
                kick_pending_request_queues(rinfo);
        }
 
-       list_for_each_entry_safe(req, n, &requests, queuelist) {
+       list_for_each_entry_safe(req, n, &info->requests, queuelist) {
                /* Requeue pending requests (flush or discard) */
                list_del_init(&req->queuelist);
                BUG_ON(req->nr_phys_segments > segs);
@@ -2093,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info)
        }
        blk_mq_kick_requeue_list(info->rq);
 
-       while ((bio = bio_list_pop(&bio_list)) != NULL) {
+       while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
                /* Traverse the list of pending bios and re-queue them */
                if (bio_segments(bio) > segs) {
                        /*
@@ -2139,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev)
 {
        struct blkfront_info *info = dev_get_drvdata(&dev->dev);
        int err = 0;
+       unsigned int i, j;
 
        dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
 
+       bio_list_init(&info->bio_list);
+       INIT_LIST_HEAD(&info->requests);
+       for (i = 0; i < info->nr_rings; i++) {
+               struct blkfront_ring_info *rinfo = &info->rinfo[i];
+               struct bio_list merge_bio;
+               struct blk_shadow *shadow = rinfo->shadow;
+
+               for (j = 0; j < BLK_RING_SIZE(info); j++) {
+                       /* Not in use? */
+                       if (!shadow[j].request)
+                               continue;
+
+                       /*
+                        * Get the bios in the request so we can re-queue them.
+                        */
+                       if (shadow[j].request->cmd_flags &
+                                       (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+                               /*
+                                * Flush operations don't contain bios, so
+                                * we need to requeue the whole request
+                                */
+                               list_add(&shadow[j].request->queuelist, &info->requests);
+                               continue;
+                       }
+                       merge_bio.head = shadow[j].request->bio;
+                       merge_bio.tail = shadow[j].request->biotail;
+                       bio_list_merge(&info->bio_list, &merge_bio);
+                       shadow[j].request->bio = NULL;
+                       blk_mq_end_request(shadow[j].request, 0);
+               }
+       }
+
        blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 
        err = negotiate_mq(info);
@@ -2149,6 +2132,8 @@ static int blkfront_resume(struct xenbus_device *dev)
                return err;
 
        err = talk_to_blkback(dev, info);
+       if (!err)
+               blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
 
        /*
         * We have to wait for the backend to switch to
@@ -2485,10 +2470,23 @@ static void blkback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateConnected:
-               if (dev->state != XenbusStateInitialised) {
+               /*
+                * talk_to_blkback sets state to XenbusStateInitialised
+                * and blkfront_connect sets it to XenbusStateConnected
+                * (if connection went OK).
+                *
+                * If the backend (or toolstack) decides to poke at backend
+                * state (and re-trigger the watch by setting the state repeatedly
+                * to XenbusStateConnected (4)) we need to deal with this.
+                * This is allowed as this is used to communicate to the guest
+                * that the size of disk has changed!
+                */
+               if ((dev->state != XenbusStateInitialised) &&
+                   (dev->state != XenbusStateConnected)) {
                        if (talk_to_blkback(dev, info))
                                break;
                }
+
                blkfront_connect(info);
                break;
 
index aef87fdbd187990d85cbe1b4332a54d82a1f499a..44311296ec021350c7a9598ee32010e17463da3e 100644 (file)
@@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags)
        return false;
 }
 
+void intel_gtt_insert_page(dma_addr_t addr,
+                          unsigned int pg,
+                          unsigned int flags)
+{
+       intel_private.driver->write_entry(addr, pg, flags);
+}
+EXPORT_SYMBOL(intel_gtt_insert_page);
+
 void intel_gtt_insert_sg_entries(struct sg_table *st,
                                 unsigned int pg_start,
                                 unsigned int flags)
index 94fb407d85615057ce95d80ef8c119f858346c48..44b1bd6baa38b20db8e93e058a1312c255482990 100644 (file)
@@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
        while (!list_empty(&intf->waiting_rcv_msgs)) {
                smi_msg = list_entry(intf->waiting_rcv_msgs.next,
                                     struct ipmi_smi_msg, link);
+               list_del(&smi_msg->link);
                if (!run_to_completion)
                        spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
                                               flags);
@@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
                if (rv > 0) {
                        /*
                         * To preserve message order, quit if we
-                        * can't handle a message.
+                        * can't handle a message.  Add the message
+                        * back at the head, this is safe because this
+                        * tasklet is the only thing that pulls the
+                        * messages.
                         */
+                       list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
                        break;
                } else {
-                       list_del(&smi_msg->link);
                        if (rv == 0)
                                /* Message handled */
                                ipmi_free_smi_msg(smi_msg);
index 53ddba26578ce341eb3f488001a9cc267342eeda..98efbfcdb503e181d32f44a0dd0b44f286e6080a 100644 (file)
@@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE
 config COMMON_CLK_NXP
        def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
        select REGMAP_MMIO if ARCH_LPC32XX
+       select MFD_SYSCON if ARCH_LPC18XX
        ---help---
          Support for clock providers on NXP platforms.
 
index 10f846cc8db172c5491ddc2508f7084ac5ef5e71..25d5906640c365fe48c618361cb23c98e6a69897 100644 (file)
@@ -99,7 +99,7 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
        struct clk_programmable *prog = to_clk_programmable(hw);
        const struct clk_programmable_layout *layout = prog->layout;
        unsigned int mask = layout->css_mask;
-       unsigned int pckr = 0;
+       unsigned int pckr = index;
 
        if (layout->have_slck_mck)
                mask |= AT91_PMC_CSSMCK_MCK;
index efba7d4dbcfc4134a88dff1efa5309f0551a8ea6..79bcb2e4206048c74add49b88f9efe97d11622ff 100644 (file)
@@ -144,9 +144,9 @@ static int oxnas_stdclk_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        regmap = syscon_node_to_regmap(of_get_parent(np));
-       if (!regmap) {
+       if (IS_ERR(regmap)) {
                dev_err(&pdev->dev, "failed to have parent regmap\n");
-               return -EINVAL;
+               return PTR_ERR(regmap);
        }
 
        for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {
index 020a29acc5b079e7c6560745804a6fa91e7d9273..51f54380474b6acbe4f39a6ea1a54c19cb4121f1 100644 (file)
@@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev)
 
        /* register fixed rate clocks */
        clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
-                                               CLK_IS_ROOT, 24000000);
+                                               0, 24000000);
        clks[FRCCLK] =  clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
-                                               CLK_IS_ROOT, 8000000);
+                                               0, 8000000);
        clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
-                                               CLK_IS_ROOT, 8000000);
+                                               0, 8000000);
        clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
-                                               CLK_IS_ROOT, 32000);
+                                               0, 32000);
        clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
-                                               CLK_IS_ROOT, 24000000);
+                                               0, 24000000);
        /* fixed rate (optional) clock */
        if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
                pr_info("pic32-clk: dt requests SOSC.\n");
index 4bb130cd006275f5c70e27bcda7a746a00c76d34..05b3d73bfefaacdd54c6123609165d6d2d8481d8 100644 (file)
@@ -321,9 +321,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
        }
 
        cclk = clk_register(NULL, &cpuclk->hw);
-       if (IS_ERR(clk)) {
+       if (IS_ERR(cclk)) {
                pr_err("%s: could not register cpuclk %s\n", __func__,  name);
-               ret = PTR_ERR(clk);
+               ret = PTR_ERR(cclk);
                goto free_rate_table;
        }
 
index bc856f21f6b20d35e2c421c51a54e1718274302e..077fcdc7908bb9f3791fe20bc60a0266327ca050 100644 (file)
@@ -41,8 +41,6 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
 #define ROCKCHIP_MMC_DEGREE_MASK 0x3
 #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
 #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
-#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1
-#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1
 
 #define PSECS_PER_SEC 1000000000000LL
 
@@ -154,6 +152,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
                return ERR_PTR(-ENOMEM);
 
        init.name = name;
+       init.flags = 0;
        init.num_parents = num_parents;
        init.parent_names = parent_names;
        init.ops = &rockchip_mmc_clk_ops;
@@ -162,15 +161,6 @@ struct clk *rockchip_clk_register_mmc(const char *name,
        mmc_clock->reg = reg;
        mmc_clock->shift = shift;
 
-       /*
-        * Assert init_state to soft reset the CLKGEN
-        * for mmc tuning phase and degree
-        */
-       if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT)
-               writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET,
-                                    ROCKCHIP_MMC_INIT_STATE_RESET,
-                                    mmc_clock->shift), mmc_clock->reg);
-
        clk = clk_register(NULL, &mmc_clock->hw);
        if (IS_ERR(clk))
                kfree(mmc_clock);
index 291543f52caad63630b45cf3500d94e2bf4f9733..8059a8d3ea36430e359e4857b33b58a5f4952aec 100644 (file)
@@ -832,9 +832,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
                        RK3399_CLKGATE_CON(13), 1, GFLAGS),
 
        /* perihp */
-       GATE(0, "cpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+       GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(5), 0, GFLAGS),
-       GATE(0, "gpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
+       GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
                        RK3399_CLKGATE_CON(5), 1, GFLAGS),
        COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
                        RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
@@ -1466,6 +1466,8 @@ static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = {
 
 static const char *const rk3399_cru_critical_clocks[] __initconst = {
        "aclk_cci_pre",
+       "aclk_gic",
+       "aclk_gic_noc",
        "pclk_perilp0",
        "pclk_perilp0",
        "hclk_perilp0",
@@ -1508,6 +1510,7 @@ static void __init rk3399_clk_init(struct device_node *np)
        ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
        if (IS_ERR(ctx)) {
                pr_err("%s: rockchip clk init failed\n", __func__);
+               iounmap(reg_base);
                return;
        }
 
@@ -1553,6 +1556,7 @@ static void __init rk3399_pmu_clk_init(struct device_node *np)
        ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
        if (IS_ERR(ctx)) {
                pr_err("%s: rockchip pmu clk init failed\n", __func__);
+               iounmap(reg_base);
                return;
        }
 
index 445a7498d6df8f590ef14f447e39dc3e72392a77..9780fac6d029b9b1b85a03ce3d489b31be598281 100644 (file)
@@ -33,6 +33,8 @@ struct sun4i_a10_display_clk_data {
 
        u8      width_div;
        u8      width_mux;
+
+       u32     flags;
 };
 
 struct reset_data {
@@ -166,7 +168,7 @@ static void __init sun4i_a10_display_init(struct device_node *node,
                                     data->has_div ? &div->hw : NULL,
                                     data->has_div ? &clk_divider_ops : NULL,
                                     &gate->hw, &clk_gate_ops,
-                                    0);
+                                    data->flags);
        if (IS_ERR(clk)) {
                pr_err("%s: Couldn't register the clock\n", clk_name);
                goto free_div;
@@ -232,6 +234,7 @@ static const struct sun4i_a10_display_clk_data sun4i_a10_tcon_ch0_data __initcon
        .offset_rst     = 29,
        .offset_mux     = 24,
        .width_mux      = 2,
+       .flags          = CLK_SET_RATE_PARENT,
 };
 
 static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node)
index 98a4582de56a27c370848b240cb044345e145dc3..b6d29d1bedcaead534320d5a2cad836005821aac 100644 (file)
@@ -79,15 +79,11 @@ static int tcon_ch1_is_enabled(struct clk_hw *hw)
 static u8 tcon_ch1_get_parent(struct clk_hw *hw)
 {
        struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
-       int num_parents = clk_hw_get_num_parents(hw);
        u32 reg;
 
        reg = readl(tclk->reg) >> TCON_CH1_SCLK2_MUX_SHIFT;
        reg &= reg >> TCON_CH1_SCLK2_MUX_MASK;
 
-       if (reg >= num_parents)
-               return -EINVAL;
-
        return reg;
 }
 
index 15d06fcf0b500c323e938c08de5947e394121cb9..b02f9c606e0baa59023629b3c1cba8c7ebfbc686 100644 (file)
@@ -56,11 +56,21 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
 /* proc_event_counts is used as the sequence number of the netlink message */
 static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
 
-static inline void get_seq(__u32 *ts, int *cpu)
+static inline void send_msg(struct cn_msg *msg)
 {
        preempt_disable();
-       *ts = __this_cpu_inc_return(proc_event_counts) - 1;
-       *cpu = smp_processor_id();
+
+       msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
+       ((struct proc_event *)msg->data)->cpu = smp_processor_id();
+
+       /*
+        * Preemption remains disabled during send to ensure the messages are
+        * ordered according to their sequence numbers.
+        *
+        * If cn_netlink_send() fails, the data is not sent.
+        */
+       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
+
        preempt_enable();
 }
 
@@ -77,7 +87,6 @@ void proc_fork_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_FORK;
        rcu_read_lock();
@@ -92,8 +101,7 @@ void proc_fork_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       /*  If cn_netlink_send() failed, the data is not sent */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_exec_connector(struct task_struct *task)
@@ -108,7 +116,6 @@ void proc_exec_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_EXEC;
        ev->event_data.exec.process_pid = task->pid;
@@ -118,7 +125,7 @@ void proc_exec_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_id_connector(struct task_struct *task, int which_id)
@@ -150,14 +157,13 @@ void proc_id_connector(struct task_struct *task, int which_id)
                return;
        }
        rcu_read_unlock();
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
 
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_sid_connector(struct task_struct *task)
@@ -172,7 +178,6 @@ void proc_sid_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_SID;
        ev->event_data.sid.process_pid = task->pid;
@@ -182,7 +187,7 @@ void proc_sid_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
@@ -197,7 +202,6 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_PTRACE;
        ev->event_data.ptrace.process_pid  = task->pid;
@@ -215,7 +219,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_comm_connector(struct task_struct *task)
@@ -230,7 +234,6 @@ void proc_comm_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_COMM;
        ev->event_data.comm.process_pid  = task->pid;
@@ -241,7 +244,7 @@ void proc_comm_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_coredump_connector(struct task_struct *task)
@@ -256,7 +259,6 @@ void proc_coredump_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_COREDUMP;
        ev->event_data.coredump.process_pid = task->pid;
@@ -266,7 +268,7 @@ void proc_coredump_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 void proc_exit_connector(struct task_struct *task)
@@ -281,7 +283,6 @@ void proc_exit_connector(struct task_struct *task)
        msg = buffer_to_cn_msg(buffer);
        ev = (struct proc_event *)msg->data;
        memset(&ev->event_data, 0, sizeof(ev->event_data));
-       get_seq(&msg->seq, &ev->cpu);
        ev->timestamp_ns = ktime_get_ns();
        ev->what = PROC_EVENT_EXIT;
        ev->event_data.exit.process_pid = task->pid;
@@ -293,7 +294,7 @@ void proc_exit_connector(struct task_struct *task)
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 /*
@@ -325,7 +326,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
        msg->ack = rcvd_ack + 1;
        msg->len = sizeof(*ev);
        msg->flags = 0; /* not used */
-       cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+       send_msg(msg);
 }
 
 /**
index 3646b143bbf5342f0ac8e0a1b90a86efc7c8cef5..0bb44d5b5df49d385b5bcfd43297b750417461a7 100644 (file)
@@ -79,15 +79,16 @@ static const struct of_device_id machines[] __initconst = {
 static int __init cpufreq_dt_platdev_init(void)
 {
        struct device_node *np = of_find_node_by_path("/");
+       const struct of_device_id *match;
 
        if (!np)
                return -ENODEV;
 
-       if (!of_match_node(machines, np))
+       match = of_match_node(machines, np);
+       of_node_put(np);
+       if (!match)
                return -ENODEV;
 
-       of_node_put(of_root);
-
        return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
                                                               NULL, 0));
 }
index 36bc11a106aa075ab2bf0d85d00f0acff2b60770..5617c7087d775ff5f4f93cc084b6bc8769be77ec 100644 (file)
@@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
                                        unsigned int target_freq)
 {
-       clamp_val(target_freq, policy->min, policy->max);
+       target_freq = clamp_val(target_freq, policy->min, policy->max);
 
        return cpufreq_driver->fast_switch(policy, target_freq);
 }
@@ -2261,6 +2261,10 @@ int cpufreq_update_policy(unsigned int cpu)
         * -> ask driver for current freq and notify governors about a change
         */
        if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+               if (cpufreq_suspended) {
+                       ret = -EAGAIN;
+                       goto unlock;
+               }
                new_policy.cur = cpufreq_update_current_freq(policy);
                if (WARN_ON(!new_policy.cur)) {
                        ret = -EIO;
index 3a9c4325d6e224a55b0aa334476b5cc99529f3c9..1fa1a32928d70a30050b30ca2d6eef4aea62d8e3 100644 (file)
@@ -372,26 +372,9 @@ static bool intel_pstate_get_ppc_enable_status(void)
        return acpi_ppc;
 }
 
-/*
- * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
- * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
- * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
- * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
- * target ratio 0x17. The _PSS control value stores in a format which can be
- * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
- * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
- * This function converts the _PSS control value to intel pstate driver format
- * for comparison and assignment.
- */
-static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
-{
-       return cpu->acpi_perf_data.states[index].control >> 8;
-}
-
 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu;
-       int turbo_pss_ctl;
        int ret;
        int i;
 
@@ -441,15 +424,14 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
         * max frequency, which will cause a reduced performance as
         * this driver uses real max turbo frequency as the max
         * frequency. So correct this frequency in _PSS table to
-        * correct max turbo frequency based on the turbo ratio.
+        * correct max turbo frequency based on the turbo state.
         * Also need to convert to MHz as _PSS freq is in MHz.
         */
-       turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
-       if (turbo_pss_ctl > cpu->pstate.max_pstate)
+       if (!limits->turbo_disabled)
                cpu->acpi_perf_data.states[0].core_frequency =
                                        policy->cpuinfo.max_freq / 1000;
        cpu->valid_pss_table = true;
-       pr_info("_PPC limits will be enforced\n");
+       pr_debug("_PPC limits will be enforced\n");
 
        return;
 
@@ -1418,6 +1400,9 @@ static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
 {
        struct cpudata *cpu = all_cpu_data[cpu_num];
 
+       if (cpu->update_util_set)
+               return;
+
        /* Prevent intel_pstate_update_util() from using stale data. */
        cpu->sample.time = 0;
        cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
@@ -1458,7 +1443,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
-       intel_pstate_clear_update_util_hook(policy->cpu);
+       pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+                policy->cpuinfo.max_freq, policy->max);
 
        cpu = all_cpu_data[0];
        if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
@@ -1495,13 +1481,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                                   limits->max_sysfs_pct);
        limits->max_perf_pct = max(limits->min_policy_pct,
                                   limits->max_perf_pct);
-       limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
        /* Make sure min_perf_pct <= max_perf_pct */
        limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
 
        limits->min_perf = div_fp(limits->min_perf_pct, 100);
        limits->max_perf = div_fp(limits->max_perf_pct, 100);
+       limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
 
  out:
        intel_pstate_set_update_util_hook(policy->cpu);
@@ -1558,8 +1544,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
-       policy->cpuinfo.max_freq =
-               cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       update_turbo_state();
+       policy->cpuinfo.max_freq = limits->turbo_disabled ?
+                       cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+       policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+
        intel_pstate_init_acpi_perf_limits(policy);
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        cpumask_set_cpu(policy->cpu, policy->cpus);
index 808a320e9d5d0cdb0a943de7d9af5c89e9b444d5..a7ecb9a84c159a464665b4ad600b3490aa2b29ed 100644 (file)
@@ -487,7 +487,7 @@ static int __init pcc_cpufreq_probe(void)
        doorbell.space_id = reg_resource->space_id;
        doorbell.bit_width = reg_resource->bit_width;
        doorbell.bit_offset = reg_resource->bit_offset;
-       doorbell.access_width = 64;
+       doorbell.access_width = 4;
        doorbell.address = reg_resource->address;
 
        pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
index a4d0059e232cbd22478d29d92370eb027e8bde79..c73207abb5a44a77ed0819101ab75cd626ceb353 100644 (file)
@@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
 
        struct cpuidle_state *target_state = &drv->states[index];
        bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
-       u64 time_start, time_end;
+       ktime_t time_start, time_end;
        s64 diff;
 
        /*
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        sched_idle_set_state(target_state);
 
        trace_cpu_idle_rcuidle(index, dev->cpu);
-       time_start = local_clock();
+       time_start = ns_to_ktime(local_clock());
 
        stop_critical_timings();
        entered_state = target_state->enter(dev, drv, index);
        start_critical_timings();
 
-       time_end = local_clock();
+       time_end = ns_to_ktime(local_clock());
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
        /* The cpu is no longer idle or about to enter idle. */
@@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        if (!cpuidle_state_is_coupled(drv, index))
                local_irq_enable();
 
-       /*
-        * local_clock() returns the time in nanosecond, let's shift
-        * by 10 (divide by 1024) to have microsecond based time.
-        */
-       diff = (time_end - time_start) >> 10;
+       diff = ktime_us_delta(time_end, time_start);
        if (diff > INT_MAX)
                diff = INT_MAX;
 
index 52c7395cb8d8b793483530ace5736ff9bd318c24..0d0d4529ee36013b9eef60e1f7d18b7df9961705 100644 (file)
@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
        struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
        struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
        unsigned int unit;
+       u32 unit_size;
        int ret;
 
        if (!ctx->u.aes.key_len)
@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
        if (!req->info)
                return -EINVAL;
 
-       for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
-               if (!(req->nbytes & (unit_size_map[unit].size - 1)))
-                       break;
+       unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+       if (req->nbytes <= unit_size_map[0].size) {
+               for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
+                       if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
+                               unit_size = unit_size_map[unit].value;
+                               break;
+                       }
+               }
+       }
 
-       if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+       if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
            (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
                /* Use the fallback to process the request for any
                 * unsupported unit sizes or key sizes
@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
        rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
        rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
                                           : CCP_AES_ACTION_DECRYPT;
-       rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+       rctx->cmd.u.xts.unit_size = unit_size;
        rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
        rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
        rctx->cmd.u.xts.iv = &rctx->iv_sg;
index 6eefaa2fe58f990ce39ca1d7c7078a3d65eeafa6..63464e86f2b1bdd77d115132933e5cbfbc20ecc9 100644 (file)
@@ -1986,7 +1986,7 @@ err_algs:
                                        &dd->pdata->algs_info[i].algs_list[j]);
 err_pm:
        pm_runtime_disable(dev);
-       if (dd->polling_mode)
+       if (!dd->polling_mode)
                dma_release_channel(dd->dma_lch);
 data_err:
        dev_err(dev, "initialization failed.\n");
index 6d74b91f2152807a0f65470dd799aa5080a3c111..5fc3dbb9ada0240011f58fa072e3015fb69bea7e 100644 (file)
@@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
                             $(obj)/qat_rsapubkey-asn1.h
 $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
                              $(obj)/qat_rsaprivkey-asn1.h
+$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
 
 clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
 clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
index 574e87c7f2b8933c04242de63afce3ac65723cf3..9acccad26928a4d60cb4fdcbde3feab45a11c3ad 100644 (file)
@@ -781,7 +781,7 @@ static int hash_process_data(struct hash_device_data *device_data,
                                                &device_data->state);
                                memmove(req_ctx->state.buffer,
                                        device_data->state.buffer,
-                                       HASH_BLOCK_SIZE / sizeof(u32));
+                                       HASH_BLOCK_SIZE);
                                if (ret) {
                                        dev_err(device_data->dev,
                                                "%s: hash_resume_state() failed!\n",
@@ -832,7 +832,7 @@ static int hash_process_data(struct hash_device_data *device_data,
 
                        memmove(device_data->state.buffer,
                                req_ctx->state.buffer,
-                               HASH_BLOCK_SIZE / sizeof(u32));
+                               HASH_BLOCK_SIZE);
                        if (ret) {
                                dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
                                        __func__);
index 495577b6d31b33d72792bb7f0f14b3d0a2c66b7e..94ad5c0adbcbd3002e4813d70b100459cfa1ec57 100644 (file)
@@ -182,7 +182,7 @@ struct crypto_alg p8_aes_cbc_alg = {
        .cra_name = "cbc(aes)",
        .cra_driver_name = "p8_aes_cbc",
        .cra_module = THIS_MODULE,
-       .cra_priority = 1000,
+       .cra_priority = 2000,
        .cra_type = &crypto_blkcipher_type,
        .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
        .cra_alignmask = 0,
index 0a3c1b04cf3c6057fe667ec95e10588906613935..38ed10d761d006eb2f9e24e1c318f98a09a4a323 100644 (file)
@@ -166,7 +166,7 @@ struct crypto_alg p8_aes_ctr_alg = {
        .cra_name = "ctr(aes)",
        .cra_driver_name = "p8_aes_ctr",
        .cra_module = THIS_MODULE,
-       .cra_priority = 1000,
+       .cra_priority = 2000,
        .cra_type = &crypto_blkcipher_type,
        .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
        .cra_alignmask = 0,
index 9f4994cabcc7f44af299a8c8d3fbd90c41982fa4..b18e67d0e065d897bd34d4e2b255056293a17550 100644 (file)
@@ -141,7 +141,7 @@ my $vmr = sub {
 
 # Some ABIs specify vrsave, special-purpose register #256, as reserved
 # for system use.
-my $no_vrsave = ($flavour =~ /aix|linux64le/);
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
 my $mtspr = sub {
     my ($f,$idx,$ra) = @_;
     if ($idx == 256 && $no_vrsave) {
index 1d6c803804d5f44a3cd5ad4b6b90ee60ae6b407d..e92418facc92ae133f6d00553501c8b1ce8d6292 100644 (file)
@@ -268,8 +268,11 @@ int update_devfreq(struct devfreq *devfreq)
        devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
 
        err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
-       if (err)
+       if (err) {
+               freqs.new = cur_freq;
+               devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
                return err;
+       }
 
        freqs.new = freq;
        devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
@@ -552,6 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
        devfreq->profile = profile;
        strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
        devfreq->previous_freq = profile->initial_freq;
+       devfreq->last_status.current_frequency = profile->initial_freq;
        devfreq->data = data;
        devfreq->nb.notifier_call = devfreq_notifier_call;
 
@@ -561,23 +565,22 @@ struct devfreq *devfreq_add_device(struct device *dev,
                mutex_lock(&devfreq->lock);
        }
 
-       devfreq->trans_table =  devm_kzalloc(dev, sizeof(unsigned int) *
-                                               devfreq->profile->max_state *
-                                               devfreq->profile->max_state,
-                                               GFP_KERNEL);
-       devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
-                                               devfreq->profile->max_state,
-                                               GFP_KERNEL);
-       devfreq->last_stat_updated = jiffies;
-
        dev_set_name(&devfreq->dev, "%s", dev_name(dev));
        err = device_register(&devfreq->dev);
        if (err) {
-               put_device(&devfreq->dev);
                mutex_unlock(&devfreq->lock);
                goto err_out;
        }
 
+       devfreq->trans_table =  devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
+                                               devfreq->profile->max_state *
+                                               devfreq->profile->max_state,
+                                               GFP_KERNEL);
+       devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
+                                               devfreq->profile->max_state,
+                                               GFP_KERNEL);
+       devfreq->last_stat_updated = jiffies;
+
        srcu_init_notifier_head(&devfreq->transition_notifier_list);
 
        mutex_unlock(&devfreq->lock);
@@ -603,7 +606,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
 err_init:
        list_del(&devfreq->node);
        device_unregister(&devfreq->dev);
-       kfree(devfreq);
 err_out:
        return ERR_PTR(err);
 }
@@ -621,7 +623,6 @@ int devfreq_remove_device(struct devfreq *devfreq)
                return -EINVAL;
 
        device_unregister(&devfreq->dev);
-       put_device(&devfreq->dev);
 
        return 0;
 }
index 6b6a5f310486feda5c9b323a7129819dc163b7f3..a5841403bde8b64c314c921148b6189d7dcef82f 100644 (file)
@@ -220,9 +220,6 @@ static int exynos_nocp_parse_dt(struct platform_device *pdev,
 
        /* Maps the memory mapped IO to control nocp register */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (IS_ERR(res))
-               return PTR_ERR(res);
-
        base = devm_ioremap_resource(dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
index 9824bc4addf856d4b674b47d15058a74fa0f74e7..25bcfa0b474fe3bf9c2584ecd4aa799a7262c0c8 100644 (file)
@@ -1,11 +1,20 @@
 menu "DMABUF options"
 
 config SYNC_FILE
-       bool "sync_file support for fences"
+       bool "Explicit Synchronization Framework"
        default n
        select ANON_INODES
        select DMA_SHARED_BUFFER
        ---help---
-         This option enables the fence framework synchronization to export
-         sync_files to userspace that can represent one or more fences.
+         The Sync File Framework adds explicit syncronization via
+         userspace. It enables send/receive 'struct fence' objects to/from
+         userspace via Sync File fds for synchronization between drivers via
+         userspace components. It has been ported from Android.
+
+         The first and main user for this is graphics in which a fence is
+         associated with a buffer. When a job is submitted to the GPU a fence
+         is attached to the buffer and is transferred via userspace, using Sync
+         Files fds, to the DRM driver for example. More details at
+         Documentation/sync_file.txt.
+
 endmenu
index 4a424eca75edd7f31745b835b30e6710e611bd27..f353db213a8143e41b28869183d4f73e6119d54e 100644 (file)
@@ -1,2 +1,2 @@
-obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
+obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
 obj-$(CONFIG_SYNC_FILE)                += sync_file.o
index 4a2c07ee667773bf02b1ece356c60089ccffb867..ddaee60ae52a5595063c4fac1389eef10ffcb3c0 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/seq_file.h>
 #include <linux/poll.h>
 #include <linux/reservation.h>
+#include <linux/mm.h>
 
 #include <uapi/linux/dma-buf.h>
 
@@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
        dmabuf = file->private_data;
 
        /* check for overflowing the buffer's size */
-       if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+       if (vma->vm_pgoff + vma_pages(vma) >
            dmabuf->size >> PAGE_SHIFT)
                return -EINVAL;
 
@@ -333,6 +334,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        struct reservation_object *resv = exp_info->resv;
        struct file *file;
        size_t alloc_size = sizeof(struct dma_buf);
+       int ret;
 
        if (!exp_info->resv)
                alloc_size += sizeof(struct reservation_object);
@@ -356,8 +358,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 
        dmabuf = kzalloc(alloc_size, GFP_KERNEL);
        if (!dmabuf) {
-               module_put(exp_info->owner);
-               return ERR_PTR(-ENOMEM);
+               ret = -ENOMEM;
+               goto err_module;
        }
 
        dmabuf->priv = exp_info->priv;
@@ -378,8 +380,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
                                        exp_info->flags);
        if (IS_ERR(file)) {
-               kfree(dmabuf);
-               return ERR_CAST(file);
+               ret = PTR_ERR(file);
+               goto err_dmabuf;
        }
 
        file->f_mode |= FMODE_LSEEK;
@@ -393,6 +395,12 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        mutex_unlock(&db_list.lock);
 
        return dmabuf;
+
+err_dmabuf:
+       kfree(dmabuf);
+err_module:
+       module_put(exp_info->owner);
+       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(dma_buf_export);
 
@@ -723,11 +731,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
                return -EINVAL;
 
        /* check for offset overflow */
-       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+       if (pgoff + vma_pages(vma) < pgoff)
                return -EOVERFLOW;
 
        /* check for overflowing the buffer's size */
-       if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+       if (pgoff + vma_pages(vma) >
            dmabuf->size >> PAGE_SHIFT)
                return -EINVAL;
 
@@ -823,7 +831,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
 
 #ifdef CONFIG_DEBUG_FS
-static int dma_buf_describe(struct seq_file *s)
+static int dma_buf_debug_show(struct seq_file *s, void *unused)
 {
        int ret;
        struct dma_buf *buf_obj;
@@ -878,17 +886,9 @@ static int dma_buf_describe(struct seq_file *s)
        return 0;
 }
 
-static int dma_buf_show(struct seq_file *s, void *unused)
-{
-       void (*func)(struct seq_file *) = s->private;
-
-       func(s);
-       return 0;
-}
-
 static int dma_buf_debug_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, dma_buf_show, inode->i_private);
+       return single_open(file, dma_buf_debug_show, NULL);
 }
 
 static const struct file_operations dma_buf_debug_fops = {
@@ -902,20 +902,23 @@ static struct dentry *dma_buf_debugfs_dir;
 
 static int dma_buf_init_debugfs(void)
 {
+       struct dentry *d;
        int err = 0;
 
-       dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL);
+       d = debugfs_create_dir("dma_buf", NULL);
+       if (IS_ERR(d))
+               return PTR_ERR(d);
 
-       if (IS_ERR(dma_buf_debugfs_dir)) {
-               err = PTR_ERR(dma_buf_debugfs_dir);
-               dma_buf_debugfs_dir = NULL;
-               return err;
-       }
-
-       err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
+       dma_buf_debugfs_dir = d;
 
-       if (err)
+       d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
+                               NULL, &dma_buf_debug_fops);
+       if (IS_ERR(d)) {
                pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
+               debugfs_remove_recursive(dma_buf_debugfs_dir);
+               dma_buf_debugfs_dir = NULL;
+               err = PTR_ERR(d);
+       }
 
        return err;
 }
@@ -925,17 +928,6 @@ static void dma_buf_uninit_debugfs(void)
        if (dma_buf_debugfs_dir)
                debugfs_remove_recursive(dma_buf_debugfs_dir);
 }
-
-int dma_buf_debugfs_create_file(const char *name,
-                               int (*write)(struct seq_file *))
-{
-       struct dentry *d;
-
-       d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
-                       write, &dma_buf_debug_fops);
-
-       return PTR_ERR_OR_ZERO(d);
-}
 #else
 static inline int dma_buf_init_debugfs(void)
 {
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
new file mode 100644 (file)
index 0000000..a8731c8
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * fence-array: aggregate fences to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ *     Gustavo Padovan <gustavo@padovan.org>
+ *     Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/fence-array.h>
+
+static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
+
+static const char *fence_array_get_driver_name(struct fence *fence)
+{
+       return "fence_array";
+}
+
+static const char *fence_array_get_timeline_name(struct fence *fence)
+{
+       return "unbound";
+}
+
+static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
+{
+       struct fence_array_cb *array_cb =
+               container_of(cb, struct fence_array_cb, cb);
+       struct fence_array *array = array_cb->array;
+
+       if (atomic_dec_and_test(&array->num_pending))
+               fence_signal(&array->base);
+       fence_put(&array->base);
+}
+
+static bool fence_array_enable_signaling(struct fence *fence)
+{
+       struct fence_array *array = to_fence_array(fence);
+       struct fence_array_cb *cb = (void *)(&array[1]);
+       unsigned i;
+
+       for (i = 0; i < array->num_fences; ++i) {
+               cb[i].array = array;
+               /*
+                * As we may report that the fence is signaled before all
+                * callbacks are complete, we need to take an additional
+                * reference count on the array so that we do not free it too
+                * early. The core fence handling will only hold the reference
+                * until we signal the array as complete (but that is now
+                * insufficient).
+                */
+               fence_get(&array->base);
+               if (fence_add_callback(array->fences[i], &cb[i].cb,
+                                      fence_array_cb_func)) {
+                       fence_put(&array->base);
+                       if (atomic_dec_and_test(&array->num_pending))
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+static bool fence_array_signaled(struct fence *fence)
+{
+       struct fence_array *array = to_fence_array(fence);
+
+       return atomic_read(&array->num_pending) <= 0;
+}
+
+static void fence_array_release(struct fence *fence)
+{
+       struct fence_array *array = to_fence_array(fence);
+       unsigned i;
+
+       for (i = 0; i < array->num_fences; ++i)
+               fence_put(array->fences[i]);
+
+       kfree(array->fences);
+       fence_free(fence);
+}
+
+const struct fence_ops fence_array_ops = {
+       .get_driver_name = fence_array_get_driver_name,
+       .get_timeline_name = fence_array_get_timeline_name,
+       .enable_signaling = fence_array_enable_signaling,
+       .signaled = fence_array_signaled,
+       .wait = fence_default_wait,
+       .release = fence_array_release,
+};
+
+/**
+ * fence_array_create - Create a custom fence array
+ * @num_fences:                [in]    number of fences to add in the array
+ * @fences:            [in]    array containing the fences
+ * @context:           [in]    fence context to use
+ * @seqno:             [in]    sequence number to use
+ * @signal_on_any      [in]    signal on any fence in the array
+ *
+ * Allocate a fence_array object and initialize the base fence with fence_init().
+ * In case of error it returns NULL.
+ *
+ * The caller should allocte the fences array with num_fences size
+ * and fill it with the fences it wants to add to the object. Ownership of this
+ * array is take and fence_put() is used on each fence on release.
+ *
+ * If @signal_on_any is true the fence array signals if any fence in the array
+ * signals, otherwise it signals when all fences in the array signal.
+ */
+struct fence_array *fence_array_create(int num_fences, struct fence **fences,
+                                      u64 context, unsigned seqno,
+                                      bool signal_on_any)
+{
+       struct fence_array *array;
+       size_t size = sizeof(*array);
+
+       /* Allocate the callback structures behind the array. */
+       size += num_fences * sizeof(struct fence_array_cb);
+       array = kzalloc(size, GFP_KERNEL);
+       if (!array)
+               return NULL;
+
+       spin_lock_init(&array->lock);
+       fence_init(&array->base, &fence_array_ops, &array->lock,
+                  context, seqno);
+
+       array->num_fences = num_fences;
+       atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
+       array->fences = fences;
+
+       return array;
+}
+EXPORT_SYMBOL(fence_array_create);
index 7b05dbe9b2964fd8c431dde0512a0ba2f4fd6ff2..4d51f9e83fa83ad9a815cb8525756437d81fe650 100644 (file)
@@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
  * context or not. One device can have multiple separate contexts,
  * and they're used if some engine can run independently of another.
  */
-static atomic_t fence_context_counter = ATOMIC_INIT(0);
+static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
 
 /**
  * fence_context_alloc - allocate an array of fence contexts
@@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
  * This function will return the first index of the number of fences allocated.
  * The fence context is used for setting fence->context to a unique number.
  */
-unsigned fence_context_alloc(unsigned num)
+u64 fence_context_alloc(unsigned num)
 {
        BUG_ON(!num);
-       return atomic_add_return(num, &fence_context_counter) - num;
+       return atomic64_add_return(num, &fence_context_counter) - num;
 }
 EXPORT_SYMBOL(fence_context_alloc);
 
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
  */
 void
 fence_init(struct fence *fence, const struct fence_ops *ops,
-            spinlock_t *lock, unsigned context, unsigned seqno)
+            spinlock_t *lock, u64 context, unsigned seqno)
 {
        BUG_ON(!lock);
        BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
index c0bd5722c997e29d36a2771f18d4feb9b44c5a74..9566a62ad8e3186e719751df84aa35f156dae2b2 100644 (file)
 #include <linux/reservation.h>
 #include <linux/export.h>
 
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer.  A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations).  The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
 DEFINE_WW_CLASS(reservation_ww_class);
 EXPORT_SYMBOL(reservation_ww_class);
 
@@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class);
 
 const char reservation_seqcount_string[] = "reservation_seqcount";
 EXPORT_SYMBOL(reservation_seqcount_string);
-/*
- * Reserve space to add a shared fence to a reservation_object,
- * must be called with obj->lock held.
+
+/**
+ * reservation_object_reserve_shared - Reserve space to add a shared
+ * fence to a reservation_object.
+ * @obj: reservation object
+ *
+ * Should be called before reservation_object_add_shared_fence().  Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
  */
 int reservation_object_reserve_shared(struct reservation_object *obj)
 {
@@ -180,7 +199,11 @@ done:
                fence_put(old_fence);
 }
 
-/*
+/**
+ * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
  * Add a fence to a shared slot, obj->lock must be held, and
  * reservation_object_reserve_shared_fence has been called.
  */
@@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
 }
 EXPORT_SYMBOL(reservation_object_add_shared_fence);
 
+/**
+ * reservation_object_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot.  The obj->lock must be held.
+ */
 void reservation_object_add_excl_fence(struct reservation_object *obj,
                                       struct fence *fence)
 {
@@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
 }
 EXPORT_SYMBOL(reservation_object_add_excl_fence);
 
+/**
+ * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * RETURNS
+ * Zero or -errno
+ */
 int reservation_object_get_fences_rcu(struct reservation_object *obj,
                                      struct fence **pfence_excl,
                                      unsigned *pshared_count,
@@ -319,6 +361,18 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
 
+/**
+ * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
 long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
                                         bool wait_all, bool intr,
                                         unsigned long timeout)
@@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
        return ret;
 }
 
+/**
+ * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
 bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
                                          bool test_all)
 {
index f08cf2d8309eaf233e4ba31280c9609c2d34b6e5..9aaa608dfe0111515e01b59227a7ca87d72ab61b 100644 (file)
@@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence)
 
        sync_file->num_fences = 1;
        atomic_set(&sync_file->status, 1);
-       snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d",
+       snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
                 fence->ops->get_driver_name(fence),
                 fence->ops->get_timeline_name(fence), fence->context,
                 fence->seqno);
index 8e304b1befc56385b8fc34d7697a529289d8ae3b..75bd6621dc5d7fb068ca6f4dd24d71bfa56ad640 100644 (file)
@@ -242,7 +242,7 @@ struct at_xdmac_lld {
        u32             mbr_dus;        /* Destination Microblock Stride Register */
 };
 
-
+/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
 struct at_xdmac_desc {
        struct at_xdmac_lld             lld;
        enum dma_transfer_direction     direction;
@@ -253,7 +253,7 @@ struct at_xdmac_desc {
        unsigned int                    xfer_size;
        struct list_head                descs_list;
        struct list_head                xfer_node;
-};
+} __aligned(sizeof(u64));
 
 static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
 {
@@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        u32                     cur_nda, check_nda, cur_ubc, mask, value;
        u8                      dwidth = 0;
        unsigned long           flags;
+       bool                    initd;
 
        ret = dma_cookie_status(chan, cookie, txstate);
        if (ret == DMA_COMPLETE)
@@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        residue = desc->xfer_size;
        /*
         * Flush FIFO: only relevant when the transfer is source peripheral
-        * synchronized.
+        * synchronized. Flush is needed before reading CUBC because data in
+        * the FIFO are not reported by CUBC. Reporting a residue of the
+        * transfer length while we have data in FIFO can cause issue.
+        * Usecase: atmel USART has a timeout which means I have received
+        * characters but there is no more character received for a while. On
+        * timeout, it requests the residue. If the data are in the DMA FIFO,
+        * we will return a residue of the transfer length. It means no data
+        * received. If an application is waiting for these data, it will hang
+        * since we won't have another USART timeout without receiving new
+        * data.
         */
        mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
        value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
@@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        }
 
        /*
-        * When processing the residue, we need to read two registers but we
-        * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
-        * we stand in the descriptor list and AT_XDMAC_CUBC is used
-        * to know how many data are remaining for the current descriptor.
-        * Since the dma channel is not paused to not loose data, between the
-        * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
-        * descriptor.
-        * For that reason, after reading AT_XDMAC_CUBC, we check if we are
-        * still using the same descriptor by reading a second time
-        * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
-        * read again AT_XDMAC_CUBC.
+        * The easiest way to compute the residue should be to pause the DMA
+        * but doing this can lead to miss some data as some devices don't
+        * have FIFO.
+        * We need to read several registers because:
+        * - DMA is running therefore a descriptor change is possible while
+        * reading these registers
+        * - When the block transfer is done, the value of the CUBC register
+        * is set to its initial value until the fetch of the next descriptor.
+        * This value will corrupt the residue calculation so we have to skip
+        * it.
+        *
+        * INITD --------                    ------------
+        *              |____________________|
+        *       _______________________  _______________
+        * NDA       @desc2             \/   @desc3
+        *       _______________________/\_______________
+        *       __________  ___________  _______________
+        * CUBC       0    \/ MAX desc1 \/  MAX desc2
+        *       __________/\___________/\_______________
+        *
+        * Since descriptors are aligned on 64 bits, we can assume that
+        * the update of NDA and CUBC is atomic.
         * Memory barriers are used to ensure the read order of the registers.
-        * A max number of retries is set because unlikely it can never ends if
-        * we are transferring a lot of data with small buffers.
+        * A max number of retries is set because unlikely it could never ends.
         */
-       cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-       rmb();
-       cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
        for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
-               rmb();
                check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-
-               if (likely(cur_nda == check_nda))
-                       break;
-
-               cur_nda = check_nda;
+               rmb();
+               initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
                rmb();
                cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+               rmb();
+               cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+               rmb();
+
+               if ((check_nda == cur_nda) && initd)
+                       break;
        }
 
        if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
@@ -1470,6 +1489,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
                goto spin_unlock;
        }
 
+       /*
+        * Flush FIFO: only relevant when the transfer is source peripheral
+        * synchronized. Another flush is needed here because CUBC is updated
+        * when the controller sends the data write command. It can lead to
+        * report data that are not written in the memory or the device. The
+        * FIFO flush ensures that data are really written.
+        */
+       if ((desc->lld.mbr_cfg & mask) == value) {
+               at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+               while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+                       cpu_relax();
+       }
+
        /*
         * Remove size of all microblocks already transferred and the current
         * one. Then add the remaining size to transfer of the current
index 25d1dadcddd1ddf22cd63e6d91e0b95901d34bbe..d0446a75990aeb046acb8d50a329bfeb7a9999c6 100644 (file)
@@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
                goto free_resources;
        }
 
-       src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
-                                PAGE_SIZE, DMA_TO_DEVICE);
+       src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
+                              (size_t)src & ~PAGE_MASK, PAGE_SIZE,
+                              DMA_TO_DEVICE);
        unmap->addr[0] = src_dma;
 
        ret = dma_mapping_error(dma_chan->device->dev, src_dma);
@@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
        }
        unmap->to_cnt = 1;
 
-       dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
-                                 PAGE_SIZE, DMA_FROM_DEVICE);
+       dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
+                               (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
+                               DMA_FROM_DEVICE);
        unmap->addr[1] = dest_dma;
 
        ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
index 6aa256b0a1ed34c2e1a4dff091c25dce5309fb34..c3ee3ad98a63834e055b0218465d3033ff127a1c 100644 (file)
@@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
        list_for_each(item, &mc_devices) {
                mci = list_entry(item, struct mem_ctl_info, link);
 
-               edac_mod_work(&mci->work, value);
+               if (mci->op_state == OP_RUNNING_POLL)
+                       edac_mod_work(&mci->work, value);
        }
        mutex_unlock(&mem_ctls_mutex);
 }
index b4d0bf6534cf43732df678eafd956fe39209ce03..4fb2eb7c800d8839c6329cd34589eeea4dbaa5c0 100644 (file)
@@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
        { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
 };
 
-#define RIR_RNK_TGT(reg)               GET_BITFIELD(reg, 16, 19)
-#define RIR_OFFSET(reg)                GET_BITFIELD(reg,  2, 14)
+#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
+       GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
+
+#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
+       GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
 
 /* Device 16, functions 2-7 */
 
@@ -326,6 +329,7 @@ struct pci_id_descr {
 struct pci_id_table {
        const struct pci_id_descr       *descr;
        int                             n_devs;
+       enum type                       type;
 };
 
 struct sbridge_dev {
@@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
        { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0)          },
 };
 
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+#define PCI_ID_TABLE_ENTRY(A, T) {     \
+       .descr = A,                     \
+       .n_devs = ARRAY_SIZE(A),        \
+       .type = T                       \
+}
+
 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_knl_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
        {0,}
 };
 
@@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
 };
 
 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
-       PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
+       PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
        {0,}                    /* 0 terminated list. */
 };
 
@@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
                                pci_read_config_dword(pvt->pci_tad[i],
                                                      rir_offset[j][k],
                                                      &reg);
-                               tmp_mb = RIR_OFFSET(reg) << 6;
+                               tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
 
                                gb = div_u64_rem(tmp_mb, 1024, &mb);
                                edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
                                         i, j, k,
                                         gb, (mb*1000)/1024,
                                         ((u64)tmp_mb) << 20L,
-                                        (u32)RIR_RNK_TGT(reg),
+                                        (u32)RIR_RNK_TGT(pvt->info.type, reg),
                                         reg);
                        }
                }
@@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
        pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
                              rir_offset[n_rir][idx],
                              &reg);
-       *rank = RIR_RNK_TGT(reg);
+       *rank = RIR_RNK_TGT(pvt->info.type, reg);
 
        edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
                 n_rir,
@@ -2369,22 +2378,19 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
  * @num_mc: pointer to the memory controllers count, to be incremented in case
  *         of success.
  * @table: model specific table
- * @allow_dups: allow for multiple devices to exist with the same device id
- *              (as implemented, this isn't expected to work correctly in the
- *              multi-socket case).
- * @multi_bus: don't assume devices on different buses belong to different
- *             memory controllers.
  *
  * returns 0 in case of success or error code
  */
-static int sbridge_get_all_devices_full(u8 *num_mc,
-                                       const struct pci_id_table *table,
-                                       int allow_dups,
-                                       int multi_bus)
+static int sbridge_get_all_devices(u8 *num_mc,
+                                       const struct pci_id_table *table)
 {
        int i, rc;
        struct pci_dev *pdev = NULL;
+       int allow_dups = 0;
+       int multi_bus = 0;
 
+       if (table->type == KNIGHTS_LANDING)
+               allow_dups = multi_bus = 1;
        while (table && table->descr) {
                for (i = 0; i < table->n_devs; i++) {
                        if (!allow_dups || i == 0 ||
@@ -2411,11 +2417,6 @@ static int sbridge_get_all_devices_full(u8 *num_mc,
        return 0;
 }
 
-#define sbridge_get_all_devices(num_mc, table) \
-               sbridge_get_all_devices_full(num_mc, table, 0, 0)
-#define sbridge_get_all_devices_knl(num_mc, table) \
-               sbridge_get_all_devices_full(num_mc, table, 1, 1)
-
 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
                                 struct sbridge_dev *sbridge_dev)
 {
@@ -3357,12 +3358,12 @@ fail0:
 #define ICPU(model, table) \
        { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
 
-/* Order here must match "enum type" */
 static const struct x86_cpu_id sbridge_cpuids[] = {
        ICPU(0x2d, pci_dev_descr_sbridge_table),        /* SANDY_BRIDGE */
        ICPU(0x3e, pci_dev_descr_ibridge_table),        /* IVY_BRIDGE */
        ICPU(0x3f, pci_dev_descr_haswell_table),        /* HASWELL */
        ICPU(0x4f, pci_dev_descr_broadwell_table),      /* BROADWELL */
+       ICPU(0x56, pci_dev_descr_broadwell_table),      /* BROADWELL-DE */
        ICPU(0x57, pci_dev_descr_knl_table),            /* KNIGHTS_LANDING */
        { }
 };
@@ -3398,7 +3399,7 @@ static int sbridge_probe(const struct x86_cpu_id *id)
                         mc, mc + 1, num_mc);
 
                sbridge_dev->mc = mc++;
-               rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids);
+               rc = sbridge_register_mci(sbridge_dev, ptable->type);
                if (unlikely(rc < 0))
                        goto fail1;
        }
index 8b3226dca1d9d25e004bbb2a030a7d77e9ff48d2..caff46c0e214bd3e18fda8ccbd7949d5c09ea00b 100644 (file)
@@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev)
 
        palmas_enable_irq(palmas_usb);
        /* perform initial detection */
+       if (palmas_usb->enable_gpio_vbus_detection)
+               palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
        palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
        device_set_wakeup_capable(&pdev->dev, true);
        return 0;
index a850cbc48d8d17a3de7ba51cb7ea033e57ed4fcd..c49d50e68aeebb0dbe368f8e8f526b8c18bdebb0 100644 (file)
@@ -174,6 +174,7 @@ static __init void reserve_regions(void)
 {
        efi_memory_desc_t *md;
        u64 paddr, npages, size;
+       int resv;
 
        if (efi_enabled(EFI_DBG))
                pr_info("Processing EFI memory map:\n");
@@ -190,12 +191,14 @@ static __init void reserve_regions(void)
                paddr = md->phys_addr;
                npages = md->num_pages;
 
+               resv = is_reserve_region(md);
                if (efi_enabled(EFI_DBG)) {
                        char buf[64];
 
-                       pr_info("  0x%012llx-0x%012llx %s",
+                       pr_info("  0x%012llx-0x%012llx %s%s\n",
                                paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
-                               efi_md_typeattr_format(buf, sizeof(buf), md));
+                               efi_md_typeattr_format(buf, sizeof(buf), md),
+                               resv ? "*" : "");
                }
 
                memrange_efi_to_native(&paddr, &npages);
@@ -204,14 +207,9 @@ static __init void reserve_regions(void)
                if (is_normal_ram(md))
                        early_init_dt_add_memory_arch(paddr, size);
 
-               if (is_reserve_region(md)) {
+               if (resv)
                        memblock_mark_nomap(paddr, size);
-                       if (efi_enabled(EFI_DBG))
-                               pr_cont("*");
-               }
 
-               if (efi_enabled(EFI_DBG))
-                       pr_cont("\n");
        }
 
        set_bit(EFI_MEMMAP, &efi.flags);
index 48da857f4774829d2301559c7e69656bae636307..d7860614f87f532caeb21145c78352d38161b2a3 100644 (file)
@@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB
 
 menuconfig GPIOLIB
        bool "GPIO Support"
+       select ANON_INODES
        help
          This enables GPIO support through the generic GPIO library.
          You only need to enable this, if you also want to enable
@@ -48,7 +49,7 @@ config GPIO_DEVRES
 
 config OF_GPIO
        def_bool y
-       depends on OF || COMPILE_TEST
+       depends on OF
 
 config GPIO_ACPI
        def_bool y
@@ -401,9 +402,12 @@ config GPIO_TB10X
        select OF_GPIO
 
 config GPIO_TEGRA
-       bool
-       default y
+       bool "NVIDIA Tegra GPIO support"
+       default ARCH_TEGRA
        depends on ARCH_TEGRA || COMPILE_TEST
+       depends on OF
+       help
+         Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
 
 config GPIO_TS4800
        tristate "TS-4800 DIO blocks and compatibles"
@@ -530,7 +534,7 @@ menu "Port-mapped I/O GPIO drivers"
 
 config GPIO_104_DIO_48E
        tristate "ACCES 104-DIO-48E GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
@@ -540,7 +544,7 @@ config GPIO_104_DIO_48E
 
 config GPIO_104_IDIO_16
        tristate "ACCES 104-IDIO-16 GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
@@ -551,7 +555,7 @@ config GPIO_104_IDIO_16
 
 config GPIO_104_IDI_48
        tristate "ACCES 104-IDI-48 GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
@@ -627,7 +631,7 @@ config GPIO_TS5500
 
 config GPIO_WS16C48
        tristate "WinSystems WS16C48 GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the WinSystems WS16C48. The base port
index 1a647c07be677784cbac72912a7701e4c0bb1a18..fcf776971ca92ce5e3f997b471d37fe1b87d338a 100644 (file)
@@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
        struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
        const unsigned io_port = offset / 8;
-       const unsigned control_port = io_port / 2;
+       const unsigned int control_port = io_port / 3;
        const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
        unsigned long flags;
        unsigned control;
@@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
 {
        struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
        const unsigned io_port = offset / 8;
-       const unsigned control_port = io_port / 2;
+       const unsigned int control_port = io_port / 3;
        const unsigned mask = BIT(offset % 8);
        const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
        const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
index 6c75c83baf5a82a3eb1da0c7a5d5949bcf31e43b..2d2763ea1a68277eedf82f6e9c132be6f8706923 100644 (file)
@@ -247,6 +247,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
        idi48gpio->irq = irq[id];
 
        spin_lock_init(&idi48gpio->lock);
+       spin_lock_init(&idi48gpio->ack_lock);
 
        dev_set_drvdata(dev, idi48gpio);
 
index 9aabc48ff5de0d9302801856b5f8881b11d5c5a6..953e4b829e32b8aa02f0cdb0774f8fe8244ef565 100644 (file)
@@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
        /* disable interrupts and clear status */
        for (i = 0; i < kona_gpio->num_bank; i++) {
                /* Unlock the entire bank first */
-               bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
+               bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
                writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
                writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
                /* Now re-lock the bank */
-               bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
+               bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
        }
 }
 
index d39014daeef9e206bfeb8cdf967e3c55c56db459..fc5f197906ac97b09b14617ca56d52896b59453a 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <mach/hardware.h>
 #include <mach/platform.h>
-#include <mach/irqs.h>
 
 #define LPC32XX_GPIO_P3_INP_STATE              _GPREG(0x000)
 #define LPC32XX_GPIO_P3_OUTP_SET               _GPREG(0x004)
@@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
 
 static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
 {
-       return IRQ_LPC32XX_P0_P1_IRQ;
+       return -ENXIO;
 }
 
-static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
-       IRQ_LPC32XX_GPIO_00,
-       IRQ_LPC32XX_GPIO_01,
-       IRQ_LPC32XX_GPIO_02,
-       IRQ_LPC32XX_GPIO_03,
-       IRQ_LPC32XX_GPIO_04,
-       IRQ_LPC32XX_GPIO_05,
-};
-
 static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
 {
-       if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
-               return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
        return -ENXIO;
 }
 
-static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
-       IRQ_LPC32XX_GPI_00,
-       IRQ_LPC32XX_GPI_01,
-       IRQ_LPC32XX_GPI_02,
-       IRQ_LPC32XX_GPI_03,
-       IRQ_LPC32XX_GPI_04,
-       IRQ_LPC32XX_GPI_05,
-       IRQ_LPC32XX_GPI_06,
-       IRQ_LPC32XX_GPI_07,
-       IRQ_LPC32XX_GPI_08,
-       IRQ_LPC32XX_GPI_09,
-       -ENXIO, /* 10 */
-       -ENXIO, /* 11 */
-       -ENXIO, /* 12 */
-       -ENXIO, /* 13 */
-       -ENXIO, /* 14 */
-       -ENXIO, /* 15 */
-       -ENXIO, /* 16 */
-       -ENXIO, /* 17 */
-       -ENXIO, /* 18 */
-       IRQ_LPC32XX_GPI_19,
-       -ENXIO, /* 20 */
-       -ENXIO, /* 21 */
-       -ENXIO, /* 22 */
-       -ENXIO, /* 23 */
-       -ENXIO, /* 24 */
-       -ENXIO, /* 25 */
-       -ENXIO, /* 26 */
-       -ENXIO, /* 27 */
-       IRQ_LPC32XX_GPI_28,
-};
-
 static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
 {
-       if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
-               return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
        return -ENXIO;
 }
 
index e85e7539cf5d23d0eae1d4a1433b236253dd10cc..eb43ae4835c15d8a65ac8f4fde2de07c7b6e19ee 100644 (file)
@@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
        return gpio % 8;
 }
 
-static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
+static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
 {
-       struct sch_gpio *sch = gpiochip_get_data(gc);
        unsigned short offset, bit;
        u8 reg_val;
 
@@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
        return reg_val;
 }
 
-static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
+static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
                             int val)
 {
-       struct sch_gpio *sch = gpiochip_get_data(gc);
        unsigned short offset, bit;
        u8 reg_val;
 
@@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
        struct sch_gpio *sch = gpiochip_get_data(gc);
 
        spin_lock(&sch->lock);
-       sch_gpio_reg_set(gc, gpio_num, GIO, 1);
+       sch_gpio_reg_set(sch, gpio_num, GIO, 1);
        spin_unlock(&sch->lock);
        return 0;
 }
 
 static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
 {
-       return sch_gpio_reg_get(gc, gpio_num, GLV);
+       struct sch_gpio *sch = gpiochip_get_data(gc);
+       return sch_gpio_reg_get(sch, gpio_num, GLV);
 }
 
 static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
@@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
        struct sch_gpio *sch = gpiochip_get_data(gc);
 
        spin_lock(&sch->lock);
-       sch_gpio_reg_set(gc, gpio_num, GLV, val);
+       sch_gpio_reg_set(sch, gpio_num, GLV, val);
        spin_unlock(&sch->lock);
 }
 
@@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
        struct sch_gpio *sch = gpiochip_get_data(gc);
 
        spin_lock(&sch->lock);
-       sch_gpio_reg_set(gc, gpio_num, GIO, 0);
+       sch_gpio_reg_set(sch, gpio_num, GIO, 0);
        spin_unlock(&sch->lock);
 
        /*
@@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
                 * GPIO7 is configured by the CMC as SLPIOVR
                 * Enable GPIO[9:8] core powered gpios explicitly
                 */
-               sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
-               sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
+               sch_gpio_reg_set(sch, 8, GEN, 1);
+               sch_gpio_reg_set(sch, 9, GEN, 1);
                /*
                 * SUS_GPIO[2:0] enabled by default
                 * Enable SUS_GPIO3 resume powered gpio explicitly
                 */
-               sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
+               sch_gpio_reg_set(sch, 13, GEN, 1);
                break;
 
        case PCI_DEVICE_ID_INTEL_ITC_LPC:
index ec891a27952f88e33bd9a7d6f6bfe6e5069f995a..661b0e34e0672eedba9e7896c742944aeda18477 100644 (file)
@@ -98,7 +98,6 @@ struct tegra_gpio_info {
        const struct tegra_gpio_soc_config      *soc;
        struct gpio_chip                        gc;
        struct irq_chip                         ic;
-       struct lock_class_key                   lock_class;
        u32                                     bank_count;
 };
 
@@ -547,6 +546,12 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
 };
 
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different category
+ * than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
 static int tegra_gpio_probe(struct platform_device *pdev)
 {
        const struct tegra_gpio_soc_config *config;
@@ -660,7 +665,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
 
                bank = &tgi->bank_info[GPIO_BANK(gpio)];
 
-               irq_set_lockdep_class(irq, &tgi->lock_class);
+               irq_set_lockdep_class(irq, &gpio_lock_class);
                irq_set_chip_data(irq, bank);
                irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
        }
index 75c6355b018df46a192feaa4f57dedcb5be6f7b7..e72794e463aa3fd5f3adb6e05128a3f01126932b 100644 (file)
@@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "input clock not found.\n");
                return PTR_ERR(gpio->clk);
        }
+       ret = clk_prepare_enable(gpio->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Unable to enable clock.\n");
+               return ret;
+       }
 
+       pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
        ret = pm_runtime_get_sync(&pdev->dev);
        if (ret < 0)
@@ -747,6 +753,7 @@ err_pm_put:
        pm_runtime_put(&pdev->dev);
 err_pm_dis:
        pm_runtime_disable(&pdev->dev);
+       clk_disable_unprepare(gpio->clk);
 
        return ret;
 }
index 3a5c7011ad3b3e832d7cce6e55ed62562bba94be..8b830996fe0212d3ae0153ae5b708a84fa53976e 100644 (file)
@@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
        if (!desc && gpio_is_valid(gpio))
                return -EPROBE_DEFER;
 
+       err = gpiod_request(desc, label);
+       if (err)
+               return err;
+
        if (flags & GPIOF_OPEN_DRAIN)
                set_bit(FLAG_OPEN_DRAIN, &desc->flags);
 
@@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
        if (flags & GPIOF_ACTIVE_LOW)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
 
-       err = gpiod_request(desc, label);
-       if (err)
-               return err;
-
        if (flags & GPIOF_DIR_IN)
                err = gpiod_direction_input(desc);
        else
index d22dcc38179dba153d3874e39847665a2811f8b9..4aabddb38b593f3bafca1dfd21f44ab75c2fdc4f 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/io.h>
+#include <linux/io-mapping.h>
 #include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
index d407f904a31cf475647d2624ac84cd5f624c425d..be74bd370f1fc5443586d57076e9c04a80d74e1f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/cdev.h>
 #include <linux/fs.h>
 #include <linux/uaccess.h>
+#include <linux/compat.h>
 #include <uapi/linux/gpio.h>
 
 #include "gpiolib.h"
@@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        struct gpio_device *gdev = filp->private_data;
        struct gpio_chip *chip = gdev->chip;
-       int __user *ip = (int __user *)arg;
+       void __user *ip = (void __user *)arg;
 
        /* We fail any subsequent ioctl():s when the chip is gone */
        if (!chip)
@@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        return -EINVAL;
 }
 
+#ifdef CONFIG_COMPAT
+static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
+                             unsigned long arg)
+{
+       return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
 /**
  * gpio_chrdev_open() - open the chardev for ioctl operations
  * @inode: inode for this chardev
@@ -431,14 +440,15 @@ static const struct file_operations gpio_fileops = {
        .owner = THIS_MODULE,
        .llseek = noop_llseek,
        .unlocked_ioctl = gpio_ioctl,
-       .compat_ioctl = gpio_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = gpio_ioctl_compat,
+#endif
 };
 
 static void gpiodevice_release(struct device *dev)
 {
        struct gpio_device *gdev = dev_get_drvdata(dev);
 
-       cdev_del(&gdev->chrdev);
        list_del(&gdev->list);
        ida_simple_remove(&gpio_ida, gdev->id);
        kfree(gdev->label);
@@ -471,7 +481,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
 
        /* From this point, the .release() function cleans up gpio_device */
        gdev->dev.release = gpiodevice_release;
-       get_device(&gdev->dev);
        pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n",
                 __func__, gdev->base, gdev->base + gdev->ngpio - 1,
                 dev_name(&gdev->dev), gdev->chip->label ? : "generic");
@@ -618,6 +627,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
                goto err_free_label;
        }
 
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
        for (i = 0; i < chip->ngpio; i++) {
                struct gpio_desc *desc = &gdev->descs[i];
 
@@ -649,8 +660,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
                }
        }
 
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
 #ifdef CONFIG_PINCTRL
        INIT_LIST_HEAD(&gdev->pin_ranges);
 #endif
@@ -759,6 +768,8 @@ void gpiochip_remove(struct gpio_chip *chip)
         * be removed, else it will be dangling until the last user is
         * gone.
         */
+       cdev_del(&gdev->chrdev);
+       device_del(&gdev->dev);
        put_device(&gdev->dev);
 }
 EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -858,7 +869,7 @@ struct gpio_chip *gpiochip_find(void *data,
 
        spin_lock_irqsave(&gpio_lock, flags);
        list_for_each_entry(gdev, &gpio_devices, list)
-               if (match(gdev->chip, data))
+               if (gdev->chip && match(gdev->chip, data))
                        break;
 
        /* No match? */
@@ -1341,14 +1352,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
                spin_lock_irqsave(&gpio_lock, flags);
        }
 done:
-       if (status < 0) {
-               /* Clear flags that might have been set by the caller before
-                * requesting the GPIO.
-                */
-               clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
-               clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
-               clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
-       }
        spin_unlock_irqrestore(&gpio_lock, flags);
        return status;
 }
@@ -1356,11 +1359,18 @@ done:
 /*
  * This descriptor validation needs to be inserted verbatim into each
  * function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication.
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
  */
 #define VALIDATE_DESC(desc) do { \
-       if (!desc || !desc->gdev) { \
-               pr_warn("%s: invalid GPIO\n", __func__); \
+       if (!desc) \
+               return 0; \
+       if (IS_ERR(desc)) {                                             \
+               pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+               return PTR_ERR(desc); \
+       } \
+       if (!desc->gdev) { \
+               pr_warn("%s: invalid GPIO (no device)\n", __func__); \
                return -EINVAL; \
        } \
        if ( !desc->gdev->chip ) { \
@@ -1370,8 +1380,14 @@ done:
        } } while (0)
 
 #define VALIDATE_DESC_VOID(desc) do { \
-       if (!desc || !desc->gdev) { \
-               pr_warn("%s: invalid GPIO\n", __func__); \
+       if (!desc) \
+               return; \
+       if (IS_ERR(desc)) {                                             \
+               pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+               return; \
+       } \
+       if (!desc->gdev) { \
+               pr_warn("%s: invalid GPIO (no device)\n", __func__); \
                return; \
        } \
        if (!desc->gdev->chip) { \
@@ -2040,7 +2056,14 @@ int gpiod_to_irq(const struct gpio_desc *desc)
        struct gpio_chip *chip;
        int offset;
 
-       VALIDATE_DESC(desc);
+       /*
+        * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
+        * requires this function to not return zero on an invalid descriptor
+        * but rather a negative error number.
+        */
+       if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
+               return -EINVAL;
+
        chip = desc->gdev->chip;
        offset = gpio_chip_hwgpio(desc);
        if (chip->to_irq) {
@@ -2066,17 +2089,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
  */
 int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
 {
-       if (offset >= chip->ngpio)
-               return -EINVAL;
+       struct gpio_desc *desc;
 
-       if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
+       desc = gpiochip_get_desc(chip, offset);
+       if (IS_ERR(desc))
+               return PTR_ERR(desc);
+
+       /* Flush direction if something changed behind our back */
+       if (chip->get_direction) {
+               int dir = chip->get_direction(chip, offset);
+
+               if (dir)
+                       clear_bit(FLAG_IS_OUT, &desc->flags);
+               else
+                       set_bit(FLAG_IS_OUT, &desc->flags);
+       }
+
+       if (test_bit(FLAG_IS_OUT, &desc->flags)) {
                chip_err(chip,
                          "%s: tried to flag a GPIO set as output for IRQ\n",
                          __func__);
                return -EIO;
        }
 
-       set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+       set_bit(FLAG_USED_AS_IRQ, &desc->flags);
        return 0;
 }
 EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
@@ -2543,28 +2579,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(gpiod_get_optional);
 
-/**
- * gpiod_parse_flags - helper function to parse GPIO lookup flags
- * @desc:      gpio to be setup
- * @lflags:    gpio_lookup_flags - returned from of_find_gpio() or
- *             of_get_gpio_hog()
- *
- * Set the GPIO descriptor flags based on the given GPIO lookup flags.
- */
-static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
-{
-       if (lflags & GPIO_ACTIVE_LOW)
-               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-       if (lflags & GPIO_OPEN_DRAIN)
-               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
-       if (lflags & GPIO_OPEN_SOURCE)
-               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
-}
 
 /**
  * gpiod_configure_flags - helper function to configure a given GPIO
  * @desc:      gpio whose value will be assigned
  * @con_id:    function within the GPIO consumer
+ * @lflags:    gpio_lookup_flags - returned from of_find_gpio() or
+ *             of_get_gpio_hog()
  * @dflags:    gpiod_flags - optional GPIO initialization flags
  *
  * Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -2572,10 +2593,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
  * occurred while trying to acquire the GPIO.
  */
 static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
-                                enum gpiod_flags dflags)
+               unsigned long lflags, enum gpiod_flags dflags)
 {
        int status;
 
+       if (lflags & GPIO_ACTIVE_LOW)
+               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+       if (lflags & GPIO_OPEN_DRAIN)
+               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+       if (lflags & GPIO_OPEN_SOURCE)
+               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
        /* No particular flag request, return here... */
        if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
                pr_debug("no flags found for %s\n", con_id);
@@ -2642,13 +2670,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
                return desc;
        }
 
-       gpiod_parse_flags(desc, lookupflags);
-
        status = gpiod_request(desc, con_id);
        if (status < 0)
                return ERR_PTR(status);
 
-       status = gpiod_configure_flags(desc, con_id, flags);
+       status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
        if (status < 0) {
                dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
                gpiod_put(desc);
@@ -2704,6 +2730,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
        if (IS_ERR(desc))
                return desc;
 
+       ret = gpiod_request(desc, NULL);
+       if (ret)
+               return ERR_PTR(ret);
+
        if (active_low)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
 
@@ -2714,10 +2744,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
                        set_bit(FLAG_OPEN_SOURCE, &desc->flags);
        }
 
-       ret = gpiod_request(desc, NULL);
-       if (ret)
-               return ERR_PTR(ret);
-
        return desc;
 }
 EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -2770,8 +2796,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
        chip = gpiod_to_chip(desc);
        hwnum = gpio_chip_hwgpio(desc);
 
-       gpiod_parse_flags(desc, lflags);
-
        local_desc = gpiochip_request_own_desc(chip, hwnum, name);
        if (IS_ERR(local_desc)) {
                status = PTR_ERR(local_desc);
@@ -2780,7 +2804,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
                return status;
        }
 
-       status = gpiod_configure_flags(desc, name, dflags);
+       status = gpiod_configure_flags(desc, name, lflags, dflags);
        if (status < 0) {
                pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
                       name, chip->label, hwnum, status);
index be43afb08c69b727a5583772b69b57cc8c6ebd42..e3dba6f44a79e1e5c032c8d6bc16bc6959f02e3a 100644 (file)
@@ -8,7 +8,7 @@ drm-y       :=  drm_auth.o drm_bufs.o drm_cache.o \
                drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
                drm_scatter.o drm_pci.o \
                drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
-               drm_crtc.o drm_modes.o drm_edid.o \
+               drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
                drm_info.o drm_debugfs.o drm_encoder_slave.o \
                drm_trace_points.o drm_global.o drm_prime.o \
                drm_rect.o drm_vma_manager.o drm_flip_work.o \
@@ -23,7 +23,8 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
 
 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
                drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
-               drm_kms_helper_common.o drm_dp_dual_mode_helper.o
+               drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
+               drm_simple_kms_helper.o
 
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
index 992f00b65be45a2b9ca8f16058aaed076918ae2e..eb09037a7161aa81e9525b55f567ca61f09ab73c 100644 (file)
@@ -85,8 +85,12 @@ extern int amdgpu_vm_debug;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
 extern int amdgpu_powerplay;
+extern int amdgpu_powercontainment;
 extern unsigned amdgpu_pcie_gen_cap;
 extern unsigned amdgpu_pcie_lane_cap;
+extern unsigned amdgpu_cg_mask;
+extern unsigned amdgpu_pg_mask;
+extern char *amdgpu_disable_cu;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
@@ -183,6 +187,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
 int amdgpu_set_powergating_state(struct amdgpu_device *adev,
                                  enum amd_ip_block_type block_type,
                                  enum amd_powergating_state state);
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+                        enum amd_ip_block_type block_type);
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+                   enum amd_ip_block_type block_type);
 
 struct amdgpu_ip_block_version {
        enum amd_ip_block_type type;
@@ -594,11 +602,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
                     struct amdgpu_sync *sync,
                     struct reservation_object *resv,
                     void *owner);
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
-                            struct fence *fence);
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+                                    struct amdgpu_ring *ring);
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
-int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_sync *sync);
 int amdgpu_sync_init(void);
 void amdgpu_sync_fini(void);
@@ -754,12 +760,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
                             struct amdgpu_job **job);
 
+void amdgpu_job_free_resources(struct amdgpu_job *job);
 void amdgpu_job_free(struct amdgpu_job *job);
-void amdgpu_job_free_func(struct kref *refcount);
 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                      struct amd_sched_entity *entity, void *owner,
                      struct fence **f);
-void amdgpu_job_timeout_func(struct work_struct *work);
 
 struct amdgpu_ring {
        struct amdgpu_device            *adev;
@@ -771,8 +776,6 @@ struct amdgpu_ring {
        struct amdgpu_bo        *ring_obj;
        volatile uint32_t       *ring;
        unsigned                rptr_offs;
-       u64                     next_rptr_gpu_addr;
-       volatile u32            *next_rptr_cpu_addr;
        unsigned                wptr;
        unsigned                wptr_old;
        unsigned                ring_size;
@@ -791,7 +794,6 @@ struct amdgpu_ring {
        u32                     doorbell_index;
        bool                    use_doorbell;
        unsigned                wptr_offs;
-       unsigned                next_rptr_offs;
        unsigned                fence_offs;
        uint64_t                current_ctx;
        enum amdgpu_ring_type   type;
@@ -799,6 +801,9 @@ struct amdgpu_ring {
        unsigned                cond_exe_offs;
        u64                             cond_exe_gpu_addr;
        volatile u32    *cond_exe_cpu_addr;
+#if defined(CONFIG_DEBUG_FS)
+       struct dentry *ent;
+#endif
 };
 
 /*
@@ -861,6 +866,7 @@ struct amdgpu_vm {
        struct amdgpu_bo        *page_directory;
        unsigned                max_pde_used;
        struct fence            *page_directory_fence;
+       uint64_t                last_eviction_counter;
 
        /* array of page tables, one for each page directory entry */
        struct amdgpu_vm_pt     *page_tables;
@@ -883,13 +889,14 @@ struct amdgpu_vm_id {
        struct fence            *first;
        struct amdgpu_sync      active;
        struct fence            *last_flush;
-       struct amdgpu_ring      *last_user;
        atomic64_t              owner;
 
        uint64_t                pd_gpu_addr;
        /* last flushed PD/PT update */
        struct fence            *flushed_updates;
 
+       uint32_t                current_gpu_reset_count;
+
        uint32_t                gds_base;
        uint32_t                gds_size;
        uint32_t                gws_base;
@@ -905,6 +912,10 @@ struct amdgpu_vm_manager {
        struct list_head                        ids_lru;
        struct amdgpu_vm_id                     ids[AMDGPU_NUM_VM];
 
+       /* Handling of VM fences */
+       u64                                     fence_context;
+       unsigned                                seqno[AMDGPU_MAX_RINGS];
+
        uint32_t                                max_pfn;
        /* vram base address for page table entry  */
        u64                                     vram_base_offset;
@@ -926,17 +937,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
 void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
                         struct list_head *validated,
                         struct amdgpu_bo_list_entry *entry);
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct list_head *duplicates);
 void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
                                  struct amdgpu_vm *vm);
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync, struct fence *fence,
-                     unsigned *vm_id, uint64_t *vm_pd_addr);
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
-                   unsigned vm_id, uint64_t pd_addr,
-                   uint32_t gds_base, uint32_t gds_size,
-                   uint32_t gws_base, uint32_t gws_size,
-                   uint32_t oa_base, uint32_t oa_size);
+                     struct amdgpu_job *job);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@@ -1142,6 +1150,12 @@ struct amdgpu_cu_info {
        uint32_t bitmap[4][4];
 };
 
+struct amdgpu_gfx_funcs {
+       /* get the gpu clock counter */
+       uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+       void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
+};
+
 struct amdgpu_gfx {
        struct mutex                    gpu_clock_mutex;
        struct amdgpu_gca_config        config;
@@ -1178,6 +1192,7 @@ struct amdgpu_gfx {
        /* ce ram size*/
        unsigned                        ce_ram_size;
        struct amdgpu_cu_info           cu_info;
+       const struct amdgpu_gfx_funcs   *funcs;
 };
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1195,10 +1210,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
-                           uint32_t **data);
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
-                       unsigned size, uint32_t *data);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned ring_size, u32 nop, u32 align_mask,
                     struct amdgpu_irq_src *irq_src, unsigned irq_type,
@@ -1250,6 +1261,7 @@ struct amdgpu_job {
        uint32_t                num_ibs;
        void                    *owner;
        uint64_t                ctx;
+       bool                    vm_needs_flush;
        unsigned                vm_id;
        uint64_t                vm_pd_addr;
        uint32_t                gds_base, gds_size;
@@ -1257,8 +1269,7 @@ struct amdgpu_job {
        uint32_t                oa_base, oa_size;
 
        /* user fence handling */
-       struct amdgpu_bo        *uf_bo;
-       uint32_t                uf_offset;
+       uint64_t                uf_addr;
        uint64_t                uf_sequence;
 
 };
@@ -1560,6 +1571,12 @@ struct amdgpu_dpm_funcs {
        u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
        int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
        int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
+       int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
+       int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
+       int (*get_sclk_od)(struct amdgpu_device *adev);
+       int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
+       int (*get_mclk_od)(struct amdgpu_device *adev);
+       int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
 };
 
 struct amdgpu_dpm {
@@ -1767,6 +1784,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor);
 void amdgpu_debugfs_cleanup(struct drm_minor *minor);
 #endif
 
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+
 /*
  * amdgpu smumgr functions
  */
@@ -1811,15 +1830,13 @@ struct amdgpu_asic_funcs {
                             u32 sh_num, u32 reg_offset, u32 *value);
        void (*set_vga_state)(struct amdgpu_device *adev, bool state);
        int (*reset)(struct amdgpu_device *adev);
-       /* wait for mc_idle */
-       int (*wait_for_mc_idle)(struct amdgpu_device *adev);
        /* get the reference clock */
        u32 (*get_xclk)(struct amdgpu_device *adev);
-       /* get the gpu clock counter */
-       uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
        /* MM block clocks */
        int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
        int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
+       /* query virtual capabilities */
+       u32 (*get_virtual_caps)(struct amdgpu_device *adev);
 };
 
 /*
@@ -1914,8 +1931,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
 
 
 /* GPU virtualization */
+#define AMDGPU_VIRT_CAPS_SRIOV_EN       (1 << 0)
+#define AMDGPU_VIRT_CAPS_IS_VF          (1 << 1)
 struct amdgpu_virtualization {
        bool supports_sr_iov;
+       bool is_virtual;
+       u32 caps;
 };
 
 /*
@@ -1997,6 +2018,10 @@ struct amdgpu_device {
        spinlock_t didt_idx_lock;
        amdgpu_rreg_t                   didt_rreg;
        amdgpu_wreg_t                   didt_wreg;
+       /* protects concurrent gc_cac register access */
+       spinlock_t gc_cac_idx_lock;
+       amdgpu_rreg_t                   gc_cac_rreg;
+       amdgpu_wreg_t                   gc_cac_wreg;
        /* protects concurrent ENDPOINT (audio) register access */
        spinlock_t audio_endpt_idx_lock;
        amdgpu_block_rreg_t             audio_endpt_rreg;
@@ -2022,6 +2047,7 @@ struct amdgpu_device {
        atomic64_t                      vram_vis_usage;
        atomic64_t                      gtt_usage;
        atomic64_t                      num_bytes_moved;
+       atomic64_t                      num_evictions;
        atomic_t                        gpu_reset_counter;
 
        /* display */
@@ -2032,7 +2058,7 @@ struct amdgpu_device {
        struct amdgpu_irq_src           hpd_irq;
 
        /* rings */
-       unsigned                        fence_context;
+       u64                             fence_context;
        unsigned                        num_rings;
        struct amdgpu_ring              *rings[AMDGPU_MAX_RINGS];
        bool                            ib_pool_ready;
@@ -2125,6 +2151,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
+#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
+#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
 #define WREG32_P(reg, val, mask)                               \
@@ -2200,11 +2228,10 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
  */
 #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
-#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
-#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
+#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
@@ -2257,6 +2284,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
 #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
+#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
 
 #define amdgpu_dpm_get_temperature(adev) \
        ((adev)->pp_enabled ?                                           \
@@ -2335,6 +2364,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_dpm_force_clock_level(adev, type, level) \
                (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
 
+#define amdgpu_dpm_get_sclk_od(adev) \
+       (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+
+#define amdgpu_dpm_set_sclk_od(adev, value) \
+       (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+
+#define amdgpu_dpm_get_mclk_od(adev) \
+       ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_mclk_od(adev, value) \
+       ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+
 #define amdgpu_dpm_dispatch_task(adev, event_id, input, output)                \
        (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
 
@@ -2376,9 +2417,13 @@ bool amdgpu_device_is_px(struct drm_device *dev);
 #if defined(CONFIG_VGA_SWITCHEROO)
 void amdgpu_register_atpx_handler(void);
 void amdgpu_unregister_atpx_handler(void);
+bool amdgpu_has_atpx_dgpu_power_cntl(void);
+bool amdgpu_is_atpx_hybrid(void);
 #else
 static inline void amdgpu_register_atpx_handler(void) {}
 static inline void amdgpu_unregister_atpx_handler(void) {}
+static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
 #endif
 
 /*
index 32809f749903c2196cdd2a2fe2006a1bf3f2a0a9..d080d0807a5bb81fbb19f6e45926c2d8ff06050d 100644 (file)
@@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
 {
        struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
 
-       if (rdev->asic_funcs->get_gpu_clock_counter)
-               return rdev->asic_funcs->get_gpu_clock_counter(rdev);
+       if (rdev->gfx.funcs->get_gpu_clock_counter)
+               return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
        return 0;
 }
 
index 35a1248aaa7778a04fa64957d4d2b65a3911ab9c..0494fe7b62c0163abded8b316e5bc2e7d7e77d51 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/slab.h>
 #include <linux/acpi.h>
 #include <linux/pci.h>
+#include <linux/delay.h>
 
 #include "amd_acpi.h"
 
@@ -27,6 +28,7 @@ struct amdgpu_atpx_functions {
 struct amdgpu_atpx {
        acpi_handle handle;
        struct amdgpu_atpx_functions functions;
+       bool is_hybrid;
 };
 
 static struct amdgpu_atpx_priv {
@@ -63,6 +65,14 @@ bool amdgpu_has_atpx(void) {
        return amdgpu_atpx_priv.atpx_detected;
 }
 
+bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+       return amdgpu_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool amdgpu_is_atpx_hybrid(void) {
+       return amdgpu_atpx_priv.atpx.is_hybrid;
+}
+
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
@@ -142,18 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
  */
 static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
 {
-       /* make sure required functions are enabled */
-       /* dGPU power control is required */
-       if (atpx->functions.power_cntl == false) {
-               printk("ATPX dGPU power cntl not present, forcing\n");
-               atpx->functions.power_cntl = true;
-       }
+       u32 valid_bits = 0;
 
        if (atpx->functions.px_params) {
                union acpi_object *info;
                struct atpx_px_params output;
                size_t size;
-               u32 valid_bits;
 
                info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
                if (!info)
@@ -172,19 +176,43 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
                memcpy(&output, info->buffer.pointer, size);
 
                valid_bits = output.flags & output.valid_flags;
-               /* if separate mux flag is set, mux controls are required */
-               if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
-                       atpx->functions.i2c_mux_cntl = true;
-                       atpx->functions.disp_mux_cntl = true;
-               }
-               /* if any outputs are muxed, mux controls are required */
-               if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
-                                 ATPX_TV_SIGNAL_MUXED |
-                                 ATPX_DFP_SIGNAL_MUXED))
-                       atpx->functions.disp_mux_cntl = true;
 
                kfree(info);
        }
+
+       /* if separate mux flag is set, mux controls are required */
+       if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+               atpx->functions.i2c_mux_cntl = true;
+               atpx->functions.disp_mux_cntl = true;
+       }
+       /* if any outputs are muxed, mux controls are required */
+       if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+                         ATPX_TV_SIGNAL_MUXED |
+                         ATPX_DFP_SIGNAL_MUXED))
+               atpx->functions.disp_mux_cntl = true;
+
+
+       /* some bioses set these bits rather than flagging power_cntl as supported */
+       if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+                         ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+               atpx->functions.power_cntl = true;
+
+       atpx->is_hybrid = false;
+       if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+               printk("ATPX Hybrid Graphics\n");
+#if 1
+               /* This is a temporary hack until the D3 cold support
+                * makes it upstream.  The ATPX power_control method seems
+                * to still work on even if the system should be using
+                * the new standardized hybrid D3 cold ACPI interface.
+                */
+               atpx->functions.power_cntl = true;
+#else
+               atpx->functions.power_cntl = false;
+#endif
+               atpx->is_hybrid = true;
+       }
+
        return 0;
 }
 
@@ -259,6 +287,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
                if (!info)
                        return -EIO;
                kfree(info);
+
+               /* 200ms delay is required after off */
+               if (state == 0)
+                       msleep(200);
        }
        return 0;
 }
index 823bf5e0b0c841606e8a27cf7d488938113cc4a6..651115dcce12c6ff332eac87de6e9e5712b6d69c 100644 (file)
@@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
        unsigned last_entry = 0, first_userptr = num_entries;
        unsigned i;
        int r;
+       unsigned long total_size = 0;
 
        array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
        if (!array)
@@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
                if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
                        oa_obj = entry->robj;
 
+               total_size += amdgpu_bo_size(entry->robj);
                trace_amdgpu_bo_list_set(list, entry->robj);
        }
 
@@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
        list->array = array;
        list->num_entries = num_entries;
 
+       trace_amdgpu_cs_bo_status(list->num_entries, total_size);
        return 0;
 
 error_free:
index 199f76baf22c3eea20207fec60c1e868d173d1cb..ee95e950a19b054b3ffcb2038e705f118f32fc41 100644 (file)
@@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
                return RREG32_UVD_CTX(index);
        case CGS_IND_REG__DIDT:
                return RREG32_DIDT(index);
+       case CGS_IND_REG_GC_CAC:
+               return RREG32_GC_CAC(index);
        case CGS_IND_REG__AUDIO_ENDPT:
                DRM_ERROR("audio endpt register access not implemented.\n");
                return 0;
@@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
                return WREG32_UVD_CTX(index, value);
        case CGS_IND_REG__DIDT:
                return WREG32_DIDT(index, value);
+       case CGS_IND_REG_GC_CAC:
+               return WREG32_GC_CAC(index, value);
        case CGS_IND_REG__AUDIO_ENDPT:
                DRM_ERROR("audio endpt register access not implemented.\n");
                return;
@@ -696,6 +700,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
        return result;
 }
 
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+       CGS_FUNC_ADEV;
+       if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+               release_firmware(adev->pm.fw);
+               return 0;
+       }
+       /* cannot release other firmware because they are not created by cgs */
+       return -EINVAL;
+}
+
 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                        enum cgs_ucode_id type,
                                        struct cgs_firmware_info *info)
@@ -737,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 
                if (!adev->pm.fw) {
                        switch (adev->asic_type) {
+                       case CHIP_TOPAZ:
+                               strcpy(fw_name, "amdgpu/topaz_smc.bin");
+                               break;
                        case CHIP_TONGA:
                                strcpy(fw_name, "amdgpu/tonga_smc.bin");
                                break;
@@ -776,6 +794,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                }
 
                hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+               amdgpu_ucode_print_smc_hdr(&hdr->header);
                adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
                ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
                ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
@@ -784,6 +803,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
 
                info->version = adev->pm.fw_version;
                info->image_size = ucode_size;
+               info->ucode_start_address = ucode_start_address;
                info->kptr = (void *)src;
        }
        return 0;
@@ -819,6 +839,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
        case CGS_SYSTEM_INFO_GFX_CU_INFO:
                sys_info->value = adev->gfx.cu_info.number;
                break;
+       case CGS_SYSTEM_INFO_GFX_SE_INFO:
+               sys_info->value = adev->gfx.config.max_shader_engines;
+               break;
        default:
                return -ENODEV;
        }
@@ -898,7 +921,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
        struct cgs_acpi_method_argument *argument = NULL;
        uint32_t i, count;
        acpi_status status;
-       int result;
+       int result = 0;
        uint32_t func_no = 0xFFFFFFFF;
 
        handle = ACPI_HANDLE(&adev->pdev->dev);
@@ -961,11 +984,11 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
                                params->integer.value = argument->value;
                                break;
                        case ACPI_TYPE_STRING:
-                               params->string.length = argument->method_length;
+                               params->string.length = argument->data_length;
                                params->string.pointer = argument->pointer;
                                break;
                        case ACPI_TYPE_BUFFER:
-                               params->buffer.length = argument->method_length;
+                               params->buffer.length = argument->data_length;
                                params->buffer.pointer = argument->pointer;
                                break;
                        default:
@@ -1068,17 +1091,14 @@ int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
        struct cgs_acpi_method_info info = {0};
 
        acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
-       acpi_input[0].method_length = sizeof(uint32_t);
        acpi_input[0].data_length = sizeof(uint32_t);
        acpi_input[0].value = acpi_function;
 
        acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
-       acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
        acpi_input[1].data_length = input_size;
        acpi_input[1].pointer = pinput;
 
        acpi_output.type = CGS_ACPI_TYPE_BUFFER;
-       acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
        acpi_output.data_length = output_size;
        acpi_output.pointer = poutput;
 
@@ -1125,6 +1145,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
        amdgpu_cgs_pm_query_clock_limits,
        amdgpu_cgs_set_camera_voltages,
        amdgpu_cgs_get_firmware_info,
+       amdgpu_cgs_rel_firmware,
        amdgpu_cgs_set_powergating_state,
        amdgpu_cgs_set_clockgating_state,
        amdgpu_cgs_get_active_displays_info,
index cb07da41152b7f8f7ef3229d56832fc1fb881c75..ff0b55a65ca3fcb29d51a2b11bf46b14c4a848a0 100644 (file)
@@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
                                                   DRM_MODE_SCALE_NONE);
                        /* no HPD on analog connectors */
                        amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
                        connector->interlace_allowed = true;
                        connector->doublescan_allowed = true;
                        break;
@@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
        }
 
        if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
-               if (i2c_bus->valid)
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+               if (i2c_bus->valid) {
+                       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                                           DRM_CONNECTOR_POLL_DISCONNECT;
+               }
        } else
                connector->polled = DRM_CONNECTOR_POLL_HPD;
 
index 9bc8f1d99733e6817ba35efe55e179aa8999e9f1..0307ff5887c593a079d8fd63fa3245fb683162f3 100644 (file)
@@ -216,11 +216,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        if (ret)
                goto free_all_kdata;
 
-       if (p->uf_entry.robj) {
-               p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
-               p->job->uf_offset = uf_offset;
-       }
-
+       if (p->uf_entry.robj)
+               p->job->uf_addr = uf_offset;
        kfree(chunk_array);
        return 0;
 
@@ -459,7 +456,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                list_splice(&need_pages, &p->validated);
        }
 
-       amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
+       amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
 
        p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
        p->bytes_moved = 0;
@@ -472,6 +469,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        if (r)
                goto error_validate;
 
+       fpriv->vm.last_eviction_counter =
+               atomic64_read(&p->adev->num_evictions);
+
        if (p->bo_list) {
                struct amdgpu_bo *gds = p->bo_list->gds_obj;
                struct amdgpu_bo *gws = p->bo_list->gws_obj;
@@ -499,6 +499,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                }
        }
 
+       if (p->uf_entry.robj)
+               p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+
 error_validate:
        if (r) {
                amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
@@ -653,18 +656,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 
        /* Only for UVD/VCE VM emulation */
        if (ring->funcs->parse_cs) {
+               p->job->vm = NULL;
                for (i = 0; i < p->job->num_ibs; i++) {
                        r = amdgpu_ring_parse_cs(ring, p, i);
                        if (r)
                                return r;
                }
-       }
+       } else {
+               p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 
-       r = amdgpu_bo_vm_update_pte(p, vm);
-       if (!r)
-               amdgpu_cs_sync_rings(p);
+               r = amdgpu_bo_vm_update_pte(p, vm);
+               if (r)
+                       return r;
+       }
 
-       return r;
+       return amdgpu_cs_sync_rings(p);
 }
 
 static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
@@ -761,7 +767,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
        }
 
        /* UVD & VCE fw doesn't support user fences */
-       if (parser->job->uf_bo && (
+       if (parser->job->uf_addr && (
            parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
            parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
                return -EINVAL;
@@ -830,17 +836,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 {
        struct amdgpu_ring *ring = p->job->ring;
        struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
-       struct fence *fence;
        struct amdgpu_job *job;
        int r;
 
        job = p->job;
        p->job = NULL;
 
-       r = amd_sched_job_init(&job->base, &ring->sched,
-                              entity, amdgpu_job_timeout_func,
-                              amdgpu_job_free_func,
-                              p->filp, &fence);
+       r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
        if (r) {
                amdgpu_job_free(job);
                return r;
@@ -848,9 +850,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        job->owner = p->filp;
        job->ctx = entity->fence_context;
-       p->fence = fence_get(fence);
-       cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
+       p->fence = fence_get(&job->base.s_fence->finished);
+       cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
        job->uf_sequence = cs->out.handle;
+       amdgpu_job_free_resources(job);
 
        trace_amdgpu_cs_ioctl(job);
        amd_sched_entity_push_job(&job->base);
index bb8b149786d708de30a0143bb273f6be8af0390b..614fb026436d347f62cd6debe35f3152ebd76df8 100644 (file)
@@ -25,6 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
+#include <linux/kthread.h>
 #include <linux/console.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
@@ -35,6 +36,7 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/efi.h>
 #include "amdgpu.h"
+#include "amdgpu_trace.h"
 #include "amdgpu_i2c.h"
 #include "atom.h"
 #include "amdgpu_atombios.h"
@@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev)
 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
                        bool always_indirect)
 {
+       uint32_t ret;
+
        if ((reg * 4) < adev->rmmio_size && !always_indirect)
-               return readl(((void __iomem *)adev->rmmio) + (reg * 4));
+               ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
        else {
                unsigned long flags;
-               uint32_t ret;
 
                spin_lock_irqsave(&adev->mmio_idx_lock, flags);
                writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
                ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
                spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
-               return ret;
        }
+       trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+       return ret;
 }
 
 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                    bool always_indirect)
 {
+       trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+       
        if ((reg * 4) < adev->rmmio_size && !always_indirect)
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        else {
@@ -827,8 +832,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
  */
 static void amdgpu_atombios_fini(struct amdgpu_device *adev)
 {
-       if (adev->mode_info.atom_context)
+       if (adev->mode_info.atom_context) {
                kfree(adev->mode_info.atom_context->scratch);
+               kfree(adev->mode_info.atom_context->iio);
+       }
        kfree(adev->mode_info.atom_context);
        adev->mode_info.atom_context = NULL;
        kfree(adev->mode_info.atom_card_info);
@@ -1068,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
        int i, r = 0;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
                if (adev->ip_blocks[i].type == block_type) {
                        r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
                                                                            state);
                        if (r)
                                return r;
+                       break;
                }
        }
        return r;
@@ -1085,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
        int i, r = 0;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
                if (adev->ip_blocks[i].type == block_type) {
                        r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
                                                                            state);
                        if (r)
                                return r;
+                       break;
                }
        }
        return r;
 }
 
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+                        enum amd_ip_block_type block_type)
+{
+       int i, r;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_blocks[i].type == block_type) {
+                       r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+                       if (r)
+                               return r;
+                       break;
+               }
+       }
+       return 0;
+
+}
+
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+                   enum amd_ip_block_type block_type)
+{
+       int i;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_blocks[i].type == block_type)
+                       return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+       }
+       return true;
+
+}
+
 const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
                                        struct amdgpu_device *adev,
                                        enum amd_ip_block_type type)
@@ -1207,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
                }
        }
 
+       adev->cg_flags &= amdgpu_cg_mask;
+       adev->pg_flags &= amdgpu_pg_mask;
+
        return 0;
 }
 
@@ -1325,6 +1375,11 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                adev->ip_block_status[i].valid = false;
        }
 
+       for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+               if (adev->ip_blocks[i].funcs->late_fini)
+                       adev->ip_blocks[i].funcs->late_fini((void *)adev);
+       }
+
        return 0;
 }
 
@@ -1378,6 +1433,15 @@ static int amdgpu_resume(struct amdgpu_device *adev)
        return 0;
 }
 
+static bool amdgpu_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+       return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+       return false;
+#endif
+}
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -1424,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
        adev->didt_rreg = &amdgpu_invalid_rreg;
        adev->didt_wreg = &amdgpu_invalid_wreg;
+       adev->gc_cac_rreg = &amdgpu_invalid_rreg;
+       adev->gc_cac_wreg = &amdgpu_invalid_wreg;
        adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
        adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
 
+
        DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
                 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
                 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -1451,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        spin_lock_init(&adev->pcie_idx_lock);
        spin_lock_init(&adev->uvd_ctx_idx_lock);
        spin_lock_init(&adev->didt_idx_lock);
+       spin_lock_init(&adev->gc_cac_idx_lock);
        spin_lock_init(&adev->audio_endpt_idx_lock);
 
        adev->rmmio_base = pci_resource_start(adev->pdev, 5);
@@ -1495,29 +1563,38 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
 
        /* Read BIOS */
-       if (!amdgpu_get_bios(adev))
-               return -EINVAL;
+       if (!amdgpu_get_bios(adev)) {
+               r = -EINVAL;
+               goto failed;
+       }
        /* Must be an ATOMBIOS */
        if (!adev->is_atom_bios) {
                dev_err(adev->dev, "Expecting atombios for GPU\n");
-               return -EINVAL;
+               r = -EINVAL;
+               goto failed;
        }
        r = amdgpu_atombios_init(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_atombios_init failed\n");
-               return r;
+               goto failed;
        }
 
        /* See if the asic supports SR-IOV */
        adev->virtualization.supports_sr_iov =
                amdgpu_atombios_has_gpu_virtualization_table(adev);
 
+       /* Check if we are executing in a virtualized environment */
+       adev->virtualization.is_virtual = amdgpu_device_is_virtual();
+       adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+
        /* Post card if necessary */
        if (!amdgpu_card_posted(adev) ||
-           adev->virtualization.supports_sr_iov) {
+           (adev->virtualization.is_virtual &&
+            !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
                if (!adev->bios) {
                        dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto failed;
                }
                DRM_INFO("GPU not posted. posting now...\n");
                amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1527,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_atombios_get_clock_info(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
-               return r;
+               goto failed;
        }
        /* init i2c buses */
        amdgpu_atombios_i2c_init(adev);
@@ -1536,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_fence_driver_init(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
-               return r;
+               goto failed;
        }
 
        /* init the mode config */
@@ -1546,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (r) {
                dev_err(adev->dev, "amdgpu_init failed\n");
                amdgpu_fini(adev);
-               return r;
+               goto failed;
        }
 
        adev->accel_working = true;
@@ -1556,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_ib_pool_init(adev);
        if (r) {
                dev_err(adev->dev, "IB initialization failed (%d).\n", r);
-               return r;
+               goto failed;
        }
 
        r = amdgpu_ib_ring_tests(adev);
@@ -1573,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                DRM_ERROR("registering register debugfs failed (%d).\n", r);
        }
 
+       r = amdgpu_debugfs_firmware_init(adev);
+       if (r) {
+               DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+               return r;
+       }
+
        if ((amdgpu_testing & 1)) {
                if (adev->accel_working)
                        amdgpu_test_moves(adev);
@@ -1598,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_late_init(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_late_init failed\n");
-               return r;
+               goto failed;
        }
 
        return 0;
+
+failed:
+       if (runtime)
+               vga_switcheroo_fini_domain_pm_ops(adev->dev);
+       return r;
 }
 
 static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
@@ -1624,6 +1712,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        amdgpu_bo_evict_vram(adev);
        amdgpu_ib_pool_fini(adev);
        amdgpu_fence_driver_fini(adev);
+       drm_crtc_force_disable_all(adev->ddev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
        kfree(adev->ip_block_status);
@@ -1635,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        kfree(adev->bios);
        adev->bios = NULL;
        vga_switcheroo_unregister_client(adev->pdev);
+       if (adev->flags & AMD_IS_PX)
+               vga_switcheroo_fini_domain_pm_ops(adev->dev);
        vga_client_register(adev->pdev, NULL, NULL, NULL);
        if (adev->rio_mem)
                pci_iounmap(adev->pdev, adev->rio_mem);
@@ -1840,11 +1931,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
  */
 int amdgpu_gpu_reset(struct amdgpu_device *adev)
 {
-       unsigned ring_sizes[AMDGPU_MAX_RINGS];
-       uint32_t *ring_data[AMDGPU_MAX_RINGS];
-
-       bool saved = false;
-
        int i, r;
        int resched;
 
@@ -1853,22 +1939,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
-       r = amdgpu_suspend(adev);
-
+       /* block scheduler */
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
+
                if (!ring)
                        continue;
-
-               ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
-               if (ring_sizes[i]) {
-                       saved = true;
-                       dev_info(adev->dev, "Saved %d dwords of commands "
-                                "on ring %d.\n", ring_sizes[i], i);
-               }
+               kthread_park(ring->sched.thread);
+               amd_sched_hw_job_reset(&ring->sched);
        }
+       /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+       amdgpu_fence_driver_force_completion(adev);
+
+       /* save scratch */
+       amdgpu_atombios_scratch_regs_save(adev);
+       r = amdgpu_suspend(adev);
 
 retry:
+       /* Disable fb access */
+       if (adev->mode_info.num_crtc) {
+               struct amdgpu_mode_mc_save save;
+               amdgpu_display_stop_mc_access(adev, &save);
+               amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+       }
+
        r = amdgpu_asic_reset(adev);
        /* post card */
        amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1877,32 +1971,29 @@ retry:
                dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
                r = amdgpu_resume(adev);
        }
-
+       /* restore scratch */
+       amdgpu_atombios_scratch_regs_restore(adev);
        if (!r) {
+               r = amdgpu_ib_ring_tests(adev);
+               if (r) {
+                       dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+                       r = amdgpu_suspend(adev);
+                       goto retry;
+               }
+
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = adev->rings[i];
                        if (!ring)
                                continue;
-
-                       amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
-                       ring_sizes[i] = 0;
-                       ring_data[i] = NULL;
-               }
-
-               r = amdgpu_ib_ring_tests(adev);
-               if (r) {
-                       dev_err(adev->dev, "ib ring test failed (%d).\n", r);
-                       if (saved) {
-                               saved = false;
-                               r = amdgpu_suspend(adev);
-                               goto retry;
-                       }
+                       amd_sched_job_recovery(&ring->sched);
+                       kthread_unpark(ring->sched.thread);
                }
        } else {
-               amdgpu_fence_driver_force_completion(adev);
+               dev_err(adev->dev, "asic resume failed (%d).\n", r);
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-                       if (adev->rings[i])
-                               kfree(ring_data[i]);
+                       if (adev->rings[i]) {
+                               kthread_unpark(adev->rings[i]->sched.thread);
+                       }
                }
        }
 
@@ -1913,13 +2004,11 @@ retry:
                /* bad news, how to tell it to userspace ? */
                dev_info(adev->dev, "GPU reset failed\n");
        }
+       amdgpu_irq_gpu_reset_resume_helper(adev);
 
        return r;
 }
 
-#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007  /* gen: chipset 1/2, asic 1/2/3 */
-#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
-
 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
 {
        u32 mask;
@@ -2073,20 +2162,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
        struct amdgpu_device *adev = f->f_inode->i_private;
        ssize_t result = 0;
        int r;
+       bool use_bank;
+       unsigned instance_bank, sh_bank, se_bank;
 
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
+       if (*pos & (1ULL << 62)) {
+               se_bank = (*pos >> 24) & 0x3FF;
+               sh_bank = (*pos >> 34) & 0x3FF;
+               instance_bank = (*pos >> 44) & 0x3FF;
+               use_bank = 1;
+               *pos &= 0xFFFFFF;
+       } else {
+               use_bank = 0;
+       }
+
+       if (use_bank) {
+               if (sh_bank >= adev->gfx.config.max_sh_per_se ||
+                   se_bank >= adev->gfx.config.max_shader_engines)
+                       return -EINVAL;
+               mutex_lock(&adev->grbm_idx_mutex);
+               amdgpu_gfx_select_se_sh(adev, se_bank,
+                                       sh_bank, instance_bank);
+       }
+
        while (size) {
                uint32_t value;
 
                if (*pos > adev->rmmio_size)
-                       return result;
+                       goto end;
 
                value = RREG32(*pos >> 2);
                r = put_user(value, (uint32_t *)buf);
-               if (r)
-                       return r;
+               if (r) {
+                       result = r;
+                       goto end;
+               }
 
                result += 4;
                buf += 4;
@@ -2094,6 +2206,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
                size -= 4;
        }
 
+end:
+       if (use_bank) {
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               mutex_unlock(&adev->grbm_idx_mutex);
+       }
+
        return result;
 }
 
@@ -2293,6 +2411,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
        return result;
 }
 
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+                                       size_t size, loff_t *pos)
+{
+       struct amdgpu_device *adev = f->f_inode->i_private;
+       ssize_t result = 0;
+       int r;
+       uint32_t *config, no_regs = 0;
+
+       if (size & 0x3 || *pos & 0x3)
+               return -EINVAL;
+
+       config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+       if (!config)
+               return -ENOMEM;
+
+       /* version, increment each time something is added */
+       config[no_regs++] = 0;
+       config[no_regs++] = adev->gfx.config.max_shader_engines;
+       config[no_regs++] = adev->gfx.config.max_tile_pipes;
+       config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+       config[no_regs++] = adev->gfx.config.max_sh_per_se;
+       config[no_regs++] = adev->gfx.config.max_backends_per_se;
+       config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+       config[no_regs++] = adev->gfx.config.max_gprs;
+       config[no_regs++] = adev->gfx.config.max_gs_threads;
+       config[no_regs++] = adev->gfx.config.max_hw_contexts;
+       config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+       config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+       config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+       config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+       config[no_regs++] = adev->gfx.config.num_tile_pipes;
+       config[no_regs++] = adev->gfx.config.backend_enable_mask;
+       config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+       config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+       config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+       config[no_regs++] = adev->gfx.config.num_gpus;
+       config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+       config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+       config[no_regs++] = adev->gfx.config.gb_addr_config;
+       config[no_regs++] = adev->gfx.config.num_rbs;
+
+       while (size && (*pos < no_regs * 4)) {
+               uint32_t value;
+
+               value = config[*pos >> 2];
+               r = put_user(value, (uint32_t *)buf);
+               if (r) {
+                       kfree(config);
+                       return r;
+               }
+
+               result += 4;
+               buf += 4;
+               *pos += 4;
+               size -= 4;
+       }
+
+       kfree(config);
+       return result;
+}
+
+
 static const struct file_operations amdgpu_debugfs_regs_fops = {
        .owner = THIS_MODULE,
        .read = amdgpu_debugfs_regs_read,
@@ -2318,11 +2498,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
        .llseek = default_llseek
 };
 
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+       .owner = THIS_MODULE,
+       .read = amdgpu_debugfs_gca_config_read,
+       .llseek = default_llseek
+};
+
 static const struct file_operations *debugfs_regs[] = {
        &amdgpu_debugfs_regs_fops,
        &amdgpu_debugfs_regs_didt_fops,
        &amdgpu_debugfs_regs_pcie_fops,
        &amdgpu_debugfs_regs_smc_fops,
+       &amdgpu_debugfs_gca_config_fops,
 };
 
 static const char *debugfs_regs_names[] = {
@@ -2330,6 +2517,7 @@ static const char *debugfs_regs_names[] = {
        "amdgpu_regs_didt",
        "amdgpu_regs_pcie",
        "amdgpu_regs_smc",
+       "amdgpu_gca_config",
 };
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
index b0832da2ef7e463ab8fe1c15d3234dcc657d14a7..7dbe8d02c5a637d42f9ea345b3cfc087966555c6 100644 (file)
@@ -220,19 +220,17 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
 
        r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
        if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(new_rbo);
                r = -EINVAL;
                DRM_ERROR("failed to pin new rbo buffer before flip\n");
-               goto cleanup;
+               goto unreserve;
        }
 
        r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
                                              &work->shared_count,
                                              &work->shared);
        if (unlikely(r != 0)) {
-               amdgpu_bo_unreserve(new_rbo);
                DRM_ERROR("failed to get fences for buffer\n");
-               goto cleanup;
+               goto unpin;
        }
 
        amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
@@ -240,7 +238,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
 
        work->base = base;
 
-       r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id);
+       r = drm_crtc_vblank_get(crtc);
        if (r) {
                DRM_ERROR("failed to get vblank before flip\n");
                goto pflip_cleanup;
@@ -268,16 +266,18 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
        return 0;
 
 vblank_cleanup:
-       drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id);
+       drm_crtc_vblank_put(crtc);
 
 pflip_cleanup:
        if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
                DRM_ERROR("failed to reserve new rbo in error path\n");
                goto cleanup;
        }
+unpin:
        if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
                DRM_ERROR("failed to unpin new rbo in error path\n");
        }
+unreserve:
        amdgpu_bo_unreserve(new_rbo);
 
 cleanup:
index f888c015f76c2e945e0b5d0fbcb20dbd538eb25c..015f1f4aae53263a66b008aeb49b43c0fdc2ce7c 100644 (file)
@@ -82,8 +82,12 @@ int amdgpu_exp_hw_support = 0;
 int amdgpu_sched_jobs = 32;
 int amdgpu_sched_hw_submission = 2;
 int amdgpu_powerplay = -1;
+int amdgpu_powercontainment = 1;
 unsigned amdgpu_pcie_gen_cap = 0;
 unsigned amdgpu_pcie_lane_cap = 0;
+unsigned amdgpu_cg_mask = 0xffffffff;
+unsigned amdgpu_pg_mask = 0xffffffff;
+char *amdgpu_disable_cu = NULL;
 
 MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
 module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -160,6 +164,9 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
 #ifdef CONFIG_DRM_AMD_POWERPLAY
 MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
 module_param_named(powerplay, amdgpu_powerplay, int, 0444);
+
+MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
+module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
 #endif
 
 MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
@@ -168,6 +175,15 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
 MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
 module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
 
+MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
+module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+
+MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
+module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+
+MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
+module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+
 static const struct pci_device_id pciidlist[] = {
 #ifdef CONFIG_DRM_AMDGPU_CIK
        /* Kaveri */
@@ -413,7 +429,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_ignore_hotplug(pdev);
-       pci_set_power_state(pdev, PCI_D3cold);
+       if (amdgpu_is_atpx_hybrid())
+               pci_set_power_state(pdev, PCI_D3cold);
+       else if (!amdgpu_has_atpx_dgpu_power_cntl())
+               pci_set_power_state(pdev, PCI_D3hot);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
 
        return 0;
@@ -430,7 +449,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
 
        drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-       pci_set_power_state(pdev, PCI_D0);
+       if (amdgpu_is_atpx_hybrid() ||
+           !amdgpu_has_atpx_dgpu_power_cntl())
+               pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        ret = pci_enable_device(pdev);
        if (ret)
@@ -515,7 +536,7 @@ static struct drm_driver kms_driver = {
        .driver_features =
            DRIVER_USE_AGP |
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
-           DRIVER_PRIME | DRIVER_RENDER,
+           DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
        .dev_priv_size = 0,
        .load = amdgpu_driver_load_kms,
        .open = amdgpu_driver_open_kms,
@@ -590,7 +611,6 @@ static int __init amdgpu_init(void)
        DRM_INFO("amdgpu kernel modesetting enabled.\n");
        driver = &kms_driver;
        pdriver = &amdgpu_kms_pci_driver;
-       driver->driver_features |= DRIVER_MODESET;
        driver->num_ioctls = amdgpu_max_kms_ioctl;
        amdgpu_register_atpx_handler();
        /* let modprobe override vga console setting */
index d1558768cfb73d57e30618b971a5edb5d1f1d54f..0b109aebfec6ddbd003aac9d782d4c9a2aede8df 100644 (file)
@@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
        if (seq != ring->fence_drv.sync_seq)
                amdgpu_fence_schedule_fallback(ring);
 
-       while (last_seq != seq) {
+       if (unlikely(seq == last_seq))
+               return;
+
+       last_seq &= drv->num_fences_mask;
+       seq &= drv->num_fences_mask;
+
+       do {
                struct fence *fence, **ptr;
 
-               ptr = &drv->fences[++last_seq & drv->num_fences_mask];
+               ++last_seq;
+               last_seq &= drv->num_fences_mask;
+               ptr = &drv->fences[last_seq];
 
                /* There is always exactly one thread signaling this fence slot */
                fence = rcu_dereference_protected(*ptr, 1);
                RCU_INIT_POINTER(*ptr, NULL);
 
-               BUG_ON(!fence);
+               if (!fence)
+                       continue;
 
                r = fence_signal(fence);
                if (!r)
@@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
                        BUG();
 
                fence_put(fence);
-       }
+       } while (last_seq != seq);
 }
 
 /**
index 8fab6486064f507c219872851ef4cd6a7fe98e10..88fbed2389c0a181d69c7918d340ec82010dc811 100644 (file)
@@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
        if (r)
                goto error_print;
 
-       amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
+       amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
        list_for_each_entry(entry, &list, head) {
                domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
                /* if anything is swapped out don't swap it in here,
index 9f95da4f053616f396418d7b794cf033620ff7cb..a074edd95c701c2705e2323bfaf713692a852654 100644 (file)
@@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
                }
        }
 }
+
+/**
+ * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
+ *
+ * @mask: array in which the per-shader array disable masks will be stored
+ * @max_se: number of SEs
+ * @max_sh: number of SHs
+ *
+ * The bitmask of CUs to be disabled in the shader array determined by se and
+ * sh is stored in mask[se * max_sh + sh].
+ */
+void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
+{
+       unsigned se, sh, cu;
+       const char *p;
+
+       memset(mask, 0, sizeof(*mask) * max_se * max_sh);
+
+       if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
+               return;
+
+       p = amdgpu_disable_cu;
+       for (;;) {
+               char *next;
+               int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
+               if (ret < 3) {
+                       DRM_ERROR("amdgpu: could not parse disable_cu\n");
+                       return;
+               }
+
+               if (se < max_se && sh < max_sh && cu < 16) {
+                       DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
+                       mask[se * max_sh + sh] |= 1u << cu;
+               } else {
+                       DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
+                                 se, sh, cu);
+               }
+
+               next = strchr(p, ',');
+               if (!next)
+                       break;
+               p = next + 1;
+       }
+}
index dc06cbda7be6c4a6c9e67008d7bc69b39044b86d..51321e154c09997037cf51ffa8bad5a0a90f5e67 100644 (file)
@@ -27,4 +27,6 @@
 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
 
+unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
+
 #endif
index 34e35423b78e81d3d91102e2acacf889fa9792c0..428ebf3a43873445c8fda72e737d24034e7daa78 100644 (file)
@@ -122,7 +122,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        bool skip_preamble, need_ctx_switch;
        unsigned patch_offset = ~0;
        struct amdgpu_vm *vm;
-       struct fence *hwf;
        uint64_t ctx;
 
        unsigned i;
@@ -160,10 +159,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                patch_offset = amdgpu_ring_init_cond_exec(ring);
 
        if (vm) {
-               r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
-                                   job->gds_base, job->gds_size,
-                                   job->gws_base, job->gws_size,
-                                   job->oa_base, job->oa_size);
+               r = amdgpu_vm_flush(ring, job);
                if (r) {
                        amdgpu_ring_undo(ring);
                        return r;
@@ -193,7 +189,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (ring->funcs->emit_hdp_invalidate)
                amdgpu_ring_emit_hdp_invalidate(ring);
 
-       r = amdgpu_fence_emit(ring, &hwf);
+       r = amdgpu_fence_emit(ring, f);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
                if (job && job->vm_id)
@@ -203,17 +199,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        }
 
        /* wrap the last IB with fence */
-       if (job && job->uf_bo) {
-               uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
-
-               addr += job->uf_offset;
-               amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
+       if (job && job->uf_addr) {
+               amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
                                       AMDGPU_FENCE_FLAG_64BIT);
        }
 
-       if (f)
-               *f = fence_get(hwf);
-
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
index 835a3fa8d8dfde320d73ba3af8f8fa306bd758e1..278708f5a744eebb69f0d719bfcb198efcf2ec11 100644 (file)
@@ -383,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
        return r;
 }
 
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
+{
+       int i, j;
+       for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
+               struct amdgpu_irq_src *src = adev->irq.sources[i];
+               if (!src)
+                       continue;
+               for (j = 0; j < src->num_types; j++)
+                       amdgpu_irq_update(adev, src, j);
+       }
+}
+
 /**
  * amdgpu_irq_get - enable interrupt
  *
index e124b59f39c146dadf7d34effe307d3376349c81..7ef09352e5347c9ce761b07328bb8a3c8c7d9514 100644 (file)
@@ -94,6 +94,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type);
 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                        unsigned type);
+void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
 
 int amdgpu_irq_add_domain(struct amdgpu_device *adev);
 void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
index f0dafa514fe48f785b5dbf53cc3367e214732414..6674d40eb3abb251a1e9baae117f0c1a1850f940 100644 (file)
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
-static void amdgpu_job_free_handler(struct work_struct *ws)
+static void amdgpu_job_timedout(struct amd_sched_job *s_job)
 {
-       struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
-       amd_sched_job_put(&job->base);
-}
+       struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
-void amdgpu_job_timeout_func(struct work_struct *work)
-{
-       struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
        DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
-                               job->base.sched->name,
-                               (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
-                               job->ring->fence_drv.sync_seq);
-
-       amd_sched_job_put(&job->base);
+                 job->base.sched->name,
+                 atomic_read(&job->ring->fence_drv.last_seq),
+                 job->ring->fence_drv.sync_seq);
+       amdgpu_gpu_reset(job->adev);
 }
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
        (*job)->vm = vm;
        (*job)->ibs = (void *)&(*job)[1];
        (*job)->num_ibs = num_ibs;
-       INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
 
        amdgpu_sync_create(&(*job)->sync);
 
@@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
        return r;
 }
 
-void amdgpu_job_free(struct amdgpu_job *job)
+void amdgpu_job_free_resources(struct amdgpu_job *job)
 {
-       unsigned i;
        struct fence *f;
+       unsigned i;
+
        /* use sched fence if available */
-       f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
+       f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
 
        for (i = 0; i < job->num_ibs; ++i)
-               amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
-       fence_put(job->fence);
+               amdgpu_ib_free(job->adev, &job->ibs[i], f);
+}
 
-       amdgpu_bo_unref(&job->uf_bo);
-       amdgpu_sync_free(&job->sync);
+void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+{
+       struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
-       if (!job->base.use_sched)
-               kfree(job);
+       fence_put(job->fence);
+       amdgpu_sync_free(&job->sync);
+       kfree(job);
 }
 
-void amdgpu_job_free_func(struct kref *refcount)
+void amdgpu_job_free(struct amdgpu_job *job)
 {
-       struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
+       amdgpu_job_free_resources(job);
+
+       fence_put(job->fence);
+       amdgpu_sync_free(&job->sync);
        kfree(job);
 }
 
@@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
                      struct amd_sched_entity *entity, void *owner,
                      struct fence **f)
 {
-       struct fence *fence;
        int r;
        job->ring = ring;
 
        if (!f)
                return -EINVAL;
 
-       r = amd_sched_job_init(&job->base, &ring->sched,
-                              entity, amdgpu_job_timeout_func,
-                              amdgpu_job_free_func, owner, &fence);
+       r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
        if (r)
                return r;
 
        job->owner = owner;
        job->ctx = entity->fence_context;
-       *f = fence_get(fence);
+       *f = fence_get(&job->base.s_fence->finished);
+       amdgpu_job_free_resources(job);
        amd_sched_entity_push_job(&job->base);
 
        return 0;
@@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
                int r;
 
                r = amdgpu_vm_grab_id(vm, ring, &job->sync,
-                                     &job->base.s_fence->base,
-                                     &job->vm_id, &job->vm_pd_addr);
+                                     &job->base.s_fence->finished,
+                                     job);
                if (r)
                        DRM_ERROR("Error getting VM ID (%d)\n", r);
 
@@ -170,29 +167,24 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
        }
        job = to_amdgpu_job(sched_job);
 
-       r = amdgpu_sync_wait(&job->sync);
-       if (r) {
-               DRM_ERROR("failed to sync wait (%d)\n", r);
-               return NULL;
-       }
+       BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
 
        trace_amdgpu_sched_run_job(job);
        r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
                               job->sync.last_vm_update, job, &fence);
-       if (r) {
+       if (r)
                DRM_ERROR("Error scheduling IBs (%d)\n", r);
-               goto err;
-       }
 
-err:
-       job->fence = fence;
-       amdgpu_job_free(job);
+       /* if gpu reset, hw fence will be replaced here */
+       fence_put(job->fence);
+       job->fence = fence_get(fence);
+       amdgpu_job_free_resources(job);
        return fence;
 }
 
 const struct amd_sched_backend_ops amdgpu_sched_ops = {
        .dependency = amdgpu_job_dependency,
        .run_job = amdgpu_job_run,
-       .begin_job = amd_sched_job_begin,
-       .finish_job = amd_sched_job_finish,
+       .timedout_job = amdgpu_job_timedout,
+       .free_job = amdgpu_job_free_cb
 };
index 40a23704a981072472877fd14ccc2a8393391e3b..a8efbb54423f60740fb197f4bcd9a1d28761c9f1 100644 (file)
@@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
        if (adev->rmmio == NULL)
                goto done_free;
 
-       pm_runtime_get_sync(dev->dev);
+       if (amdgpu_device_is_px(dev)) {
+               pm_runtime_get_sync(dev->dev);
+               pm_runtime_forbid(dev->dev);
+       }
 
        amdgpu_amdkfd_device_fini(adev);
 
@@ -135,13 +138,75 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
        }
 
 out:
-       if (r)
+       if (r) {
+               /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
+               if (adev->rmmio && amdgpu_device_is_px(dev))
+                       pm_runtime_put_noidle(dev->dev);
                amdgpu_driver_unload_kms(dev);
-
+       }
 
        return r;
 }
 
+static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
+                               struct drm_amdgpu_query_fw *query_fw,
+                               struct amdgpu_device *adev)
+{
+       switch (query_fw->fw_type) {
+       case AMDGPU_INFO_FW_VCE:
+               fw_info->ver = adev->vce.fw_version;
+               fw_info->feature = adev->vce.fb_version;
+               break;
+       case AMDGPU_INFO_FW_UVD:
+               fw_info->ver = adev->uvd.fw_version;
+               fw_info->feature = 0;
+               break;
+       case AMDGPU_INFO_FW_GMC:
+               fw_info->ver = adev->mc.fw_version;
+               fw_info->feature = 0;
+               break;
+       case AMDGPU_INFO_FW_GFX_ME:
+               fw_info->ver = adev->gfx.me_fw_version;
+               fw_info->feature = adev->gfx.me_feature_version;
+               break;
+       case AMDGPU_INFO_FW_GFX_PFP:
+               fw_info->ver = adev->gfx.pfp_fw_version;
+               fw_info->feature = adev->gfx.pfp_feature_version;
+               break;
+       case AMDGPU_INFO_FW_GFX_CE:
+               fw_info->ver = adev->gfx.ce_fw_version;
+               fw_info->feature = adev->gfx.ce_feature_version;
+               break;
+       case AMDGPU_INFO_FW_GFX_RLC:
+               fw_info->ver = adev->gfx.rlc_fw_version;
+               fw_info->feature = adev->gfx.rlc_feature_version;
+               break;
+       case AMDGPU_INFO_FW_GFX_MEC:
+               if (query_fw->index == 0) {
+                       fw_info->ver = adev->gfx.mec_fw_version;
+                       fw_info->feature = adev->gfx.mec_feature_version;
+               } else if (query_fw->index == 1) {
+                       fw_info->ver = adev->gfx.mec2_fw_version;
+                       fw_info->feature = adev->gfx.mec2_feature_version;
+               } else
+                       return -EINVAL;
+               break;
+       case AMDGPU_INFO_FW_SMC:
+               fw_info->ver = adev->pm.fw_version;
+               fw_info->feature = 0;
+               break;
+       case AMDGPU_INFO_FW_SDMA:
+               if (query_fw->index >= adev->sdma.num_instances)
+                       return -EINVAL;
+               fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
+               fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
 /*
  * Userspace get information ioctl
  */
@@ -288,67 +353,20 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
        }
        case AMDGPU_INFO_TIMESTAMP:
-               ui64 = amdgpu_asic_get_gpu_clock_counter(adev);
+               ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
                return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
        case AMDGPU_INFO_FW_VERSION: {
                struct drm_amdgpu_info_firmware fw_info;
+               int ret;
 
                /* We only support one instance of each IP block right now. */
                if (info->query_fw.ip_instance != 0)
                        return -EINVAL;
 
-               switch (info->query_fw.fw_type) {
-               case AMDGPU_INFO_FW_VCE:
-                       fw_info.ver = adev->vce.fw_version;
-                       fw_info.feature = adev->vce.fb_version;
-                       break;
-               case AMDGPU_INFO_FW_UVD:
-                       fw_info.ver = adev->uvd.fw_version;
-                       fw_info.feature = 0;
-                       break;
-               case AMDGPU_INFO_FW_GMC:
-                       fw_info.ver = adev->mc.fw_version;
-                       fw_info.feature = 0;
-                       break;
-               case AMDGPU_INFO_FW_GFX_ME:
-                       fw_info.ver = adev->gfx.me_fw_version;
-                       fw_info.feature = adev->gfx.me_feature_version;
-                       break;
-               case AMDGPU_INFO_FW_GFX_PFP:
-                       fw_info.ver = adev->gfx.pfp_fw_version;
-                       fw_info.feature = adev->gfx.pfp_feature_version;
-                       break;
-               case AMDGPU_INFO_FW_GFX_CE:
-                       fw_info.ver = adev->gfx.ce_fw_version;
-                       fw_info.feature = adev->gfx.ce_feature_version;
-                       break;
-               case AMDGPU_INFO_FW_GFX_RLC:
-                       fw_info.ver = adev->gfx.rlc_fw_version;
-                       fw_info.feature = adev->gfx.rlc_feature_version;
-                       break;
-               case AMDGPU_INFO_FW_GFX_MEC:
-                       if (info->query_fw.index == 0) {
-                               fw_info.ver = adev->gfx.mec_fw_version;
-                               fw_info.feature = adev->gfx.mec_feature_version;
-                       } else if (info->query_fw.index == 1) {
-                               fw_info.ver = adev->gfx.mec2_fw_version;
-                               fw_info.feature = adev->gfx.mec2_feature_version;
-                       } else
-                               return -EINVAL;
-                       break;
-               case AMDGPU_INFO_FW_SMC:
-                       fw_info.ver = adev->pm.fw_version;
-                       fw_info.feature = 0;
-                       break;
-               case AMDGPU_INFO_FW_SDMA:
-                       if (info->query_fw.index >= adev->sdma.num_instances)
-                               return -EINVAL;
-                       fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
-                       fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
-                       break;
-               default:
-                       return -EINVAL;
-               }
+               ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
+               if (ret)
+                       return ret;
+
                return copy_to_user(out, &fw_info,
                                    min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
        }
@@ -447,7 +465,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        dev_info.max_memory_clock = adev->pm.default_mclk * 10;
                }
                dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
-               dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
+               dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
+                       adev->gfx.config.max_shader_engines;
                dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
                dev_info._pad = 0;
                dev_info.ids_flags = 0;
@@ -755,3 +774,130 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
        DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 };
 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct amdgpu_device *adev = dev->dev_private;
+       struct drm_amdgpu_info_firmware fw_info;
+       struct drm_amdgpu_query_fw query_fw;
+       int ret, i;
+
+       /* VCE */
+       query_fw.fw_type = AMDGPU_INFO_FW_VCE;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* UVD */
+       query_fw.fw_type = AMDGPU_INFO_FW_UVD;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* GMC */
+       query_fw.fw_type = AMDGPU_INFO_FW_GMC;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* ME */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* PFP */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* CE */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* RLC */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* MEC */
+       query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
+       query_fw.index = 0;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* MEC2 */
+       if (adev->asic_type == CHIP_KAVERI ||
+           (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
+               query_fw.index = 1;
+               ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+               if (ret)
+                       return ret;
+               seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
+                          fw_info.feature, fw_info.ver);
+       }
+
+       /* SMC */
+       query_fw.fw_type = AMDGPU_INFO_FW_SMC;
+       ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+       if (ret)
+               return ret;
+       seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
+                  fw_info.feature, fw_info.ver);
+
+       /* SDMA */
+       query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               query_fw.index = i;
+               ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+               if (ret)
+                       return ret;
+               seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
+                          i, fw_info.feature, fw_info.ver);
+       }
+
+       return 0;
+}
+
+static const struct drm_info_list amdgpu_firmware_info_list[] = {
+       {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
+};
+#endif
+
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+       return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
+                                       ARRAY_SIZE(amdgpu_firmware_info_list));
+#else
+       return 0;
+#endif
+}
index 7ecea83ce453cae868306b95b1600a01653a6de7..6f0873c75a25650feb9c65330f1a3b133ab64f50 100644 (file)
@@ -589,6 +589,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
                           struct ttm_mem_reg *new_mem)
 {
        struct amdgpu_bo *rbo;
+       struct ttm_mem_reg *old_mem = &bo->mem;
 
        if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
                return;
@@ -602,6 +603,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 
        /* move_notify is called before move happens */
        amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
+
+       trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
 }
 
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
index 589b36e8c5cf4c1ac3373da7352bd57b0fd5758d..ff63b88b0ffaf721a58d63f81bb47ab227ac93b9 100644 (file)
@@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_pm_state_type state = 0;
-       long idx;
+       unsigned long idx;
        int ret;
 
        if (strlen(buf) == 1)
                adev->pp_force_state_enabled = false;
-       else {
-               ret = kstrtol(buf, 0, &idx);
+       else if (adev->pp_enabled) {
+               struct pp_states_info data;
 
-               if (ret) {
+               ret = kstrtoul(buf, 0, &idx);
+               if (ret || idx >= ARRAY_SIZE(data.states)) {
                        count = -EINVAL;
                        goto fail;
                }
 
-               if (adev->pp_enabled) {
-                       struct pp_states_info data;
-                       amdgpu_dpm_get_pp_num_states(adev, &data);
-                       state = data.states[idx];
-                       /* only set user selected power states */
-                       if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
-                               state != POWER_STATE_TYPE_DEFAULT) {
-                               amdgpu_dpm_dispatch_task(adev,
-                                               AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
-                               adev->pp_force_state_enabled = true;
-                       }
+               amdgpu_dpm_get_pp_num_states(adev, &data);
+               state = data.states[idx];
+               /* only set user selected power states */
+               if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+                   state != POWER_STATE_TYPE_DEFAULT) {
+                       amdgpu_dpm_dispatch_task(adev,
+                                       AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+                       adev->pp_force_state_enabled = true;
                }
        }
 fail:
@@ -349,6 +347,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
 
        if (adev->pp_enabled)
                size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+       else if (adev->pm.funcs->print_clock_levels)
+               size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
 
        return size;
 }
@@ -365,7 +365,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
        uint32_t i, mask = 0;
        char sub_str[2];
 
-       for (i = 0; i < strlen(buf) - 1; i++) {
+       for (i = 0; i < strlen(buf); i++) {
+               if (*(buf + i) == '\n')
+                       continue;
                sub_str[0] = *(buf + i);
                sub_str[1] = '\0';
                ret = kstrtol(sub_str, 0, &level);
@@ -379,6 +381,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
 
        if (adev->pp_enabled)
                amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+       else if (adev->pm.funcs->force_clock_level)
+               adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
 fail:
        return count;
 }
@@ -393,6 +397,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
 
        if (adev->pp_enabled)
                size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+       else if (adev->pm.funcs->print_clock_levels)
+               size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
 
        return size;
 }
@@ -409,7 +415,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
        uint32_t i, mask = 0;
        char sub_str[2];
 
-       for (i = 0; i < strlen(buf) - 1; i++) {
+       for (i = 0; i < strlen(buf); i++) {
+               if (*(buf + i) == '\n')
+                       continue;
                sub_str[0] = *(buf + i);
                sub_str[1] = '\0';
                ret = kstrtol(sub_str, 0, &level);
@@ -423,6 +431,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
 
        if (adev->pp_enabled)
                amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+       else if (adev->pm.funcs->force_clock_level)
+               adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
 fail:
        return count;
 }
@@ -437,6 +447,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
 
        if (adev->pp_enabled)
                size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+       else if (adev->pm.funcs->print_clock_levels)
+               size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
 
        return size;
 }
@@ -453,7 +465,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
        uint32_t i, mask = 0;
        char sub_str[2];
 
-       for (i = 0; i < strlen(buf) - 1; i++) {
+       for (i = 0; i < strlen(buf); i++) {
+               if (*(buf + i) == '\n')
+                       continue;
                sub_str[0] = *(buf + i);
                sub_str[1] = '\0';
                ret = kstrtol(sub_str, 0, &level);
@@ -467,6 +481,100 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
 
        if (adev->pp_enabled)
                amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+       else if (adev->pm.funcs->force_clock_level)
+               adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
+fail:
+       return count;
+}
+
+static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
+               struct device_attribute *attr,
+               char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       uint32_t value = 0;
+
+       if (adev->pp_enabled)
+               value = amdgpu_dpm_get_sclk_od(adev);
+       else if (adev->pm.funcs->get_sclk_od)
+               value = adev->pm.funcs->get_sclk_od(adev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
+               struct device_attribute *attr,
+               const char *buf,
+               size_t count)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       int ret;
+       long int value;
+
+       ret = kstrtol(buf, 0, &value);
+
+       if (ret) {
+               count = -EINVAL;
+               goto fail;
+       }
+
+       if (adev->pp_enabled) {
+               amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+               amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+       } else if (adev->pm.funcs->set_sclk_od) {
+               adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
+               adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+               amdgpu_pm_compute_clocks(adev);
+       }
+
+fail:
+       return count;
+}
+
+static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
+               struct device_attribute *attr,
+               char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       uint32_t value = 0;
+
+       if (adev->pp_enabled)
+               value = amdgpu_dpm_get_mclk_od(adev);
+       else if (adev->pm.funcs->get_mclk_od)
+               value = adev->pm.funcs->get_mclk_od(adev);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
+               struct device_attribute *attr,
+               const char *buf,
+               size_t count)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = ddev->dev_private;
+       int ret;
+       long int value;
+
+       ret = kstrtol(buf, 0, &value);
+
+       if (ret) {
+               count = -EINVAL;
+               goto fail;
+       }
+
+       if (adev->pp_enabled) {
+               amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+               amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
+       } else if (adev->pm.funcs->set_mclk_od) {
+               adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
+               adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+               amdgpu_pm_compute_clocks(adev);
+       }
+
 fail:
        return count;
 }
@@ -492,6 +600,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
 static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
                amdgpu_get_pp_dpm_pcie,
                amdgpu_set_pp_dpm_pcie);
+static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
+               amdgpu_get_pp_sclk_od,
+               amdgpu_set_pp_sclk_od);
+static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
+               amdgpu_get_pp_mclk_od,
+               amdgpu_set_pp_mclk_od);
 
 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
                                      struct device_attribute *attr,
@@ -1110,22 +1224,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
                        DRM_ERROR("failed to create device file pp_table\n");
                        return ret;
                }
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_sclk\n");
-                       return ret;
-               }
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_mclk\n");
-                       return ret;
-               }
-               ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
-               if (ret) {
-                       DRM_ERROR("failed to create device file pp_dpm_pcie\n");
-                       return ret;
-               }
        }
+
+       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_dpm_sclk\n");
+               return ret;
+       }
+       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_dpm_mclk\n");
+               return ret;
+       }
+       ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_dpm_pcie\n");
+               return ret;
+       }
+       ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_sclk_od\n");
+               return ret;
+       }
+       ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
+       if (ret) {
+               DRM_ERROR("failed to create device file pp_mclk_od\n");
+               return ret;
+       }
+
        ret = amdgpu_debugfs_pm_init(adev);
        if (ret) {
                DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1148,10 +1274,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
                device_remove_file(adev->dev, &dev_attr_pp_cur_state);
                device_remove_file(adev->dev, &dev_attr_pp_force_state);
                device_remove_file(adev->dev, &dev_attr_pp_table);
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
-               device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
        }
+       device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
+       device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
+       device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
+       device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
+       device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
 }
 
 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
index 6bd961fb43dcc7c7e3562d994e5fc0e79515ed01..c5738a22b69041356db2ef7e488defce76db6f30 100644 (file)
@@ -52,6 +52,7 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
                pp_init->chip_family = adev->family;
                pp_init->chip_id = adev->asic_type;
                pp_init->device = amdgpu_cgs_create_device(adev);
+               pp_init->powercontainment_enabled = amdgpu_powercontainment;
 
                ret = amd_powerplay_init(pp_init, amd_pp);
                kfree(pp_init);
@@ -183,13 +184,6 @@ static int amdgpu_pp_sw_fini(void *handle)
        if (ret)
                return ret;
 
-#ifdef CONFIG_DRM_AMD_POWERPLAY
-       if (adev->pp_enabled) {
-               amdgpu_pm_sysfs_fini(adev);
-               amd_powerplay_fini(adev->powerplay.pp_handle);
-       }
-#endif
-
        return ret;
 }
 
@@ -223,6 +217,22 @@ static int amdgpu_pp_hw_fini(void *handle)
        return ret;
 }
 
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       if (adev->pp_enabled) {
+               amdgpu_pm_sysfs_fini(adev);
+               amd_powerplay_fini(adev->powerplay.pp_handle);
+       }
+
+       if (adev->powerplay.ip_funcs->late_fini)
+               adev->powerplay.ip_funcs->late_fini(
+                         adev->powerplay.pp_handle);
+#endif
+}
+
 static int amdgpu_pp_suspend(void *handle)
 {
        int ret = 0;
@@ -311,6 +321,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
        .sw_fini = amdgpu_pp_sw_fini,
        .hw_init = amdgpu_pp_hw_init,
        .hw_fini = amdgpu_pp_hw_fini,
+       .late_fini = amdgpu_pp_late_fini,
        .suspend = amdgpu_pp_suspend,
        .resume = amdgpu_pp_resume,
        .is_idle = amdgpu_pp_is_idle,
index 3b02272db678e173f2caaffd96527aa332326dc0..3b885e3e9b56c006e3881bc966b3f969c8c244cb 100644 (file)
@@ -28,6 +28,7 @@
  */
 #include <linux/seq_file.h>
 #include <linux/slab.h>
+#include <linux/debugfs.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
@@ -48,6 +49,7 @@
  */
 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
                                    struct amdgpu_ring *ring);
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
 
 /**
  * amdgpu_ring_alloc - allocate space on the ring buffer
@@ -139,78 +141,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
        ring->wptr = ring->wptr_old;
 }
 
-/**
- * amdgpu_ring_backup - Back up the content of a ring
- *
- * @ring: the ring we want to back up
- *
- * Saves all unprocessed commits from a ring, returns the number of dwords saved.
- */
-unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
-                           uint32_t **data)
-{
-       unsigned size, ptr, i;
-
-       *data = NULL;
-
-       if (ring->ring_obj == NULL)
-               return 0;
-
-       /* it doesn't make sense to save anything if all fences are signaled */
-       if (!amdgpu_fence_count_emitted(ring))
-               return 0;
-
-       ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
-       size = ring->wptr + (ring->ring_size / 4);
-       size -= ptr;
-       size &= ring->ptr_mask;
-       if (size == 0)
-               return 0;
-
-       /* and then save the content of the ring */
-       *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
-       if (!*data)
-               return 0;
-       for (i = 0; i < size; ++i) {
-               (*data)[i] = ring->ring[ptr++];
-               ptr &= ring->ptr_mask;
-       }
-
-       return size;
-}
-
-/**
- * amdgpu_ring_restore - append saved commands to the ring again
- *
- * @ring: ring to append commands to
- * @size: number of dwords we want to write
- * @data: saved commands
- *
- * Allocates space on the ring and restore the previously saved commands.
- */
-int amdgpu_ring_restore(struct amdgpu_ring *ring,
-                       unsigned size, uint32_t *data)
-{
-       int i, r;
-
-       if (!size || !data)
-               return 0;
-
-       /* restore the saved ring content */
-       r = amdgpu_ring_alloc(ring, size);
-       if (r)
-               return r;
-
-       for (i = 0; i < size; ++i) {
-               amdgpu_ring_write(ring, data[i]);
-       }
-
-       amdgpu_ring_commit(ring);
-       kfree(data);
-       return 0;
-}
-
 /**
  * amdgpu_ring_init - init driver ring struct.
  *
@@ -260,14 +190,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                return r;
        }
 
-       r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
-       if (r) {
-               dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
-               return r;
-       }
-       ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
-       ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
-
        r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
        if (r) {
                dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
@@ -310,6 +232,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                }
                r = amdgpu_bo_kmap(ring->ring_obj,
                                       (void **)&ring->ring);
+
+               memset((void *)ring->ring, 0, ring->ring_size);
+
                amdgpu_bo_unreserve(ring->ring_obj);
                if (r) {
                        dev_err(adev->dev, "(%d) ring map failed\n", r);
@@ -343,10 +268,10 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
        ring->ring = NULL;
        ring->ring_obj = NULL;
 
+       amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
        amdgpu_wb_free(ring->adev, ring->fence_offs);
        amdgpu_wb_free(ring->adev, ring->rptr_offs);
        amdgpu_wb_free(ring->adev, ring->wptr_offs);
-       amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
 
        if (ring_obj) {
                r = amdgpu_bo_reserve(ring_obj, false);
@@ -357,6 +282,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
                }
                amdgpu_bo_unref(&ring_obj);
        }
+       amdgpu_debugfs_ring_fini(ring);
 }
 
 /*
@@ -364,57 +290,62 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
  */
 #if defined(CONFIG_DEBUG_FS)
 
-static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
+/* Layout of file is 12 bytes consisting of
+ * - rptr
+ * - wptr
+ * - driver's copy of wptr
+ *
+ * followed by n-words of ring data
+ */
+static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
+                                       size_t size, loff_t *pos)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct amdgpu_device *adev = dev->dev_private;
-       int roffset = (unsigned long)node->info_ent->data;
-       struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
-       uint32_t rptr, wptr, rptr_next;
-       unsigned i;
-
-       wptr = amdgpu_ring_get_wptr(ring);
-       seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr);
-
-       rptr = amdgpu_ring_get_rptr(ring);
-       rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
-
-       seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr);
-
-       seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
-                  ring->wptr, ring->wptr);
-
-       if (!ring->ready)
-               return 0;
-
-       /* print 8 dw before current rptr as often it's the last executed
-        * packet that is the root issue
-        */
-       i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
-       while (i != rptr) {
-               seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
-               if (i == rptr)
-                       seq_puts(m, " *");
-               if (i == rptr_next)
-                       seq_puts(m, " #");
-               seq_puts(m, "\n");
-               i = (i + 1) & ring->ptr_mask;
+       struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
+       int r, i;
+       uint32_t value, result, early[3];
+
+       if (*pos & 3 || size & 3)
+               return -EINVAL;
+
+       result = 0;
+
+       if (*pos < 12) {
+               early[0] = amdgpu_ring_get_rptr(ring);
+               early[1] = amdgpu_ring_get_wptr(ring);
+               early[2] = ring->wptr;
+               for (i = *pos / 4; i < 3 && size; i++) {
+                       r = put_user(early[i], (uint32_t *)buf);
+                       if (r)
+                               return r;
+                       buf += 4;
+                       result += 4;
+                       size -= 4;
+                       *pos += 4;
+               }
        }
-       while (i != wptr) {
-               seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
-               if (i == rptr)
-                       seq_puts(m, " *");
-               if (i == rptr_next)
-                       seq_puts(m, " #");
-               seq_puts(m, "\n");
-               i = (i + 1) & ring->ptr_mask;
+
+       while (size) {
+               if (*pos >= (ring->ring_size + 12))
+                       return result;
+                       
+               value = ring->ring[(*pos - 12)/4];
+               r = put_user(value, (uint32_t*)buf);
+               if (r)
+                       return r;
+               buf += 4;
+               result += 4;
+               size -= 4;
+               *pos += 4;
        }
-       return 0;
+
+       return result;
 }
 
-static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
-static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
+static const struct file_operations amdgpu_debugfs_ring_fops = {
+       .owner = THIS_MODULE,
+       .read = amdgpu_debugfs_ring_read,
+       .llseek = default_llseek
+};
 
 #endif
 
@@ -422,28 +353,27 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
                                    struct amdgpu_ring *ring)
 {
 #if defined(CONFIG_DEBUG_FS)
-       unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
-       unsigned i;
-       struct drm_info_list *info;
-       char *name;
-
-       for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
-               info = &amdgpu_debugfs_ring_info_list[i];
-               if (!info->data)
-                       break;
-       }
+       struct drm_minor *minor = adev->ddev->primary;
+       struct dentry *ent, *root = minor->debugfs_root;
+       char name[32];
 
-       if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
-               return -ENOSPC;
-
-       name = &amdgpu_debugfs_ring_names[i][0];
        sprintf(name, "amdgpu_ring_%s", ring->name);
-       info->name = name;
-       info->show = amdgpu_debugfs_ring_info;
-       info->driver_features = 0;
-       info->data = (void*)(uintptr_t)offset;
 
-       return amdgpu_debugfs_add_files(adev, info, 1);
+       ent = debugfs_create_file(name,
+                                 S_IFREG | S_IRUGO, root,
+                                 ring, &amdgpu_debugfs_ring_fops);
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
+
+       i_size_write(ent->d_inode, ring->ring_size + 12);
+       ring->ent = ent;
 #endif
        return 0;
 }
+
+static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
+{
+#if defined(CONFIG_DEBUG_FS)
+       debugfs_remove(ring->ent);
+#endif
+}
index 8bf84efafb049cd693e156a8dbf2dff396c70164..d8af37a845f4ee3d80ffdf2b27f00565150817f3 100644 (file)
@@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
                return r;
        }
        r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+       memset(sa_manager->cpu_ptr, 0, sa_manager->size);
        amdgpu_bo_unreserve(sa_manager->bo);
        return r;
 }
@@ -427,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
                           soffset, eoffset, eoffset - soffset);
 
                if (i->fence)
-                       seq_printf(m, " protected by 0x%08x on context %d",
+                       seq_printf(m, " protected by 0x%08x on context %llu",
                                   i->fence->seqno, i->fence->context);
 
                seq_printf(m, "\n");
index 34a92808bbd4dabc7ad7f62e4d5beec259bb6f82..5c8d3022fb8705acaf643519e27b2e80a187f0eb 100644 (file)
@@ -223,13 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_sync_is_idle - test if all fences are signaled
+ * amdgpu_sync_peek_fence - get the next fence not signaled yet
  *
  * @sync: the sync object
+ * @ring: optional ring to use for test
  *
- * Returns true if all fences in the sync object are signaled.
+ * Returns the next fence not signaled yet without removing it from the sync
+ * object.
  */
-bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
+                                    struct amdgpu_ring *ring)
 {
        struct amdgpu_sync_entry *e;
        struct hlist_node *tmp;
@@ -237,6 +240,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
 
        hash_for_each_safe(sync->fences, i, tmp, e, node) {
                struct fence *f = e->fence;
+               struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+
+               if (ring && s_fence) {
+                       /* For fences from the same ring it is sufficient
+                        * when they are scheduled.
+                        */
+                       if (s_fence->sched == &ring->sched) {
+                               if (fence_is_signaled(&s_fence->scheduled))
+                                       continue;
+
+                               return &s_fence->scheduled;
+                       }
+               }
 
                if (fence_is_signaled(f)) {
                        hash_del(&e->node);
@@ -245,58 +261,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
                        continue;
                }
 
-               return false;
+               return f;
        }
 
-       return true;
+       return NULL;
 }
 
 /**
- * amdgpu_sync_cycle_fences - move fences from one sync object into another
+ * amdgpu_sync_get_fence - get the next fence from the sync object
  *
- * @dst: the destination sync object
- * @src: the source sync object
- * @fence: fence to add to source
+ * @sync: sync object to use
  *
- * Remove all fences from source and put them into destination and add
- * fence as new one into source.
+ * Get and removes the next fence from the sync object not signaled yet.
  */
-int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
-                            struct fence *fence)
-{
-       struct amdgpu_sync_entry *e, *newone;
-       struct hlist_node *tmp;
-       int i;
-
-       /* Allocate the new entry before moving the old ones */
-       newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
-       if (!newone)
-               return -ENOMEM;
-
-       hash_for_each_safe(src->fences, i, tmp, e, node) {
-               struct fence *f = e->fence;
-
-               hash_del(&e->node);
-               if (fence_is_signaled(f)) {
-                       fence_put(f);
-                       kmem_cache_free(amdgpu_sync_slab, e);
-                       continue;
-               }
-
-               if (amdgpu_sync_add_later(dst, f)) {
-                       kmem_cache_free(amdgpu_sync_slab, e);
-                       continue;
-               }
-
-               hash_add(dst->fences, &e->node, f->context);
-       }
-
-       hash_add(src->fences, &newone->node, fence->context);
-       newone->fence = fence_get(fence);
-
-       return 0;
-}
-
 struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
 {
        struct amdgpu_sync_entry *e;
@@ -319,25 +296,6 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
        return NULL;
 }
 
-int amdgpu_sync_wait(struct amdgpu_sync *sync)
-{
-       struct amdgpu_sync_entry *e;
-       struct hlist_node *tmp;
-       int i, r;
-
-       hash_for_each_safe(sync->fences, i, tmp, e, node) {
-               r = fence_wait(e->fence, false);
-               if (r)
-                       return r;
-
-               hash_del(&e->node);
-               fence_put(e->fence);
-               kmem_cache_free(amdgpu_sync_slab, e);
-       }
-
-       return 0;
-}
-
 /**
  * amdgpu_sync_free - free the sync object
  *
index 26a5f4acf5840ed6218a49cd654b678005431481..0d8d65eb46cde8206824a12619eeeb5177a7cc16 100644 (file)
 #define TRACE_SYSTEM amdgpu
 #define TRACE_INCLUDE_FILE amdgpu_trace
 
+TRACE_EVENT(amdgpu_mm_rreg,
+           TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+           TP_ARGS(did, reg, value),
+           TP_STRUCT__entry(
+                               __field(unsigned, did)
+                               __field(uint32_t, reg)
+                               __field(uint32_t, value)
+                           ),
+           TP_fast_assign(
+                          __entry->did = did;
+                          __entry->reg = reg;
+                          __entry->value = value;
+                          ),
+           TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+                     (unsigned long)__entry->did,
+                     (unsigned long)__entry->reg,
+                     (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_mm_wreg,
+           TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
+           TP_ARGS(did, reg, value),
+           TP_STRUCT__entry(
+                               __field(unsigned, did)
+                               __field(uint32_t, reg)
+                               __field(uint32_t, value)
+                           ),
+           TP_fast_assign(
+                          __entry->did = did;
+                          __entry->reg = reg;
+                          __entry->value = value;
+                          ),
+           TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
+                     (unsigned long)__entry->did,
+                     (unsigned long)__entry->reg,
+                     (unsigned long)__entry->value)
+);
+
 TRACE_EVENT(amdgpu_bo_create,
            TP_PROTO(struct amdgpu_bo *bo),
            TP_ARGS(bo),
            TP_STRUCT__entry(
                             __field(struct amdgpu_bo *, bo)
                             __field(u32, pages)
+                            __field(u32, type)
+                            __field(u32, prefer)
+                            __field(u32, allow)
+                            __field(u32, visible)
                             ),
 
            TP_fast_assign(
                           __entry->bo = bo;
                           __entry->pages = bo->tbo.num_pages;
+                          __entry->type = bo->tbo.mem.mem_type;
+                          __entry->prefer = bo->prefered_domains;
+                          __entry->allow = bo->allowed_domains;
+                          __entry->visible = bo->flags;
                           ),
-           TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+
+           TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d",
+                      __entry->bo, __entry->pages, __entry->type,
+                      __entry->prefer, __entry->allow, __entry->visible)
 );
 
 TRACE_EVENT(amdgpu_cs,
@@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
                           __entry->adev = job->adev;
                           __entry->sched_job = &job->base;
                           __entry->ib = job->ibs;
-                          __entry->fence = &job->base.s_fence->base;
+                          __entry->fence = &job->base.s_fence->finished;
                           __entry->ring_name = job->ring->name;
                           __entry->num_ibs = job->num_ibs;
                           ),
@@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
                           __entry->adev = job->adev;
                           __entry->sched_job = &job->base;
                           __entry->ib = job->ibs;
-                          __entry->fence = &job->base.s_fence->base;
+                          __entry->fence = &job->base.s_fence->finished;
                           __entry->ring_name = job->ring->name;
                           __entry->num_ibs = job->num_ibs;
                           ),
@@ -100,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job,
 
 
 TRACE_EVENT(amdgpu_vm_grab_id,
-           TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
-                    uint64_t pd_addr),
-           TP_ARGS(vm, ring, vmid, pd_addr),
+           TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
+           TP_ARGS(vm, ring, job),
            TP_STRUCT__entry(
                             __field(struct amdgpu_vm *, vm)
                             __field(u32, ring)
                             __field(u32, vmid)
                             __field(u64, pd_addr)
+                            __field(u32, needs_flush)
                             ),
 
            TP_fast_assign(
                           __entry->vm = vm;
                           __entry->ring = ring;
-                          __entry->vmid = vmid;
-                          __entry->pd_addr = pd_addr;
+                          __entry->vmid = job->vm_id;
+                          __entry->pd_addr = job->vm_pd_addr;
+                          __entry->needs_flush = job->vm_needs_flush;
                           ),
-           TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
-                     __entry->ring, __entry->vmid, __entry->pd_addr)
+           TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
+                     __entry->vm, __entry->ring, __entry->vmid,
+                     __entry->pd_addr, __entry->needs_flush)
 );
 
 TRACE_EVENT(amdgpu_vm_bo_map,
@@ -244,13 +295,55 @@ TRACE_EVENT(amdgpu_bo_list_set,
            TP_STRUCT__entry(
                             __field(struct amdgpu_bo_list *, list)
                             __field(struct amdgpu_bo *, bo)
+                            __field(u64, bo_size)
                             ),
 
            TP_fast_assign(
                           __entry->list = list;
                           __entry->bo = bo;
+                          __entry->bo_size = amdgpu_bo_size(bo);
                           ),
-           TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
+           TP_printk("list=%p, bo=%p, bo_size = %Ld",
+                     __entry->list,
+                     __entry->bo,
+                     __entry->bo_size)
+);
+
+TRACE_EVENT(amdgpu_cs_bo_status,
+           TP_PROTO(uint64_t total_bo, uint64_t total_size),
+           TP_ARGS(total_bo, total_size),
+           TP_STRUCT__entry(
+                       __field(u64, total_bo)
+                       __field(u64, total_size)
+                       ),
+
+           TP_fast_assign(
+                       __entry->total_bo = total_bo;
+                       __entry->total_size = total_size;
+                       ),
+           TP_printk("total bo size = %Ld, total bo count = %Ld",
+                       __entry->total_bo, __entry->total_size)
+);
+
+TRACE_EVENT(amdgpu_ttm_bo_move,
+           TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
+           TP_ARGS(bo, new_placement, old_placement),
+           TP_STRUCT__entry(
+                       __field(struct amdgpu_bo *, bo)
+                       __field(u64, bo_size)
+                       __field(u32, new_placement)
+                       __field(u32, old_placement)
+                       ),
+
+           TP_fast_assign(
+                       __entry->bo      = bo;
+                       __entry->bo_size = amdgpu_bo_size(bo);
+                       __entry->new_placement = new_placement;
+                       __entry->old_placement = old_placement;
+                       ),
+           TP_printk("bo=%p from:%d to %d with size = %Ld",
+                       __entry->bo, __entry->old_placement,
+                       __entry->new_placement, __entry->bo_size)
 );
 
 #endif
index 3b9053af47629617544f8f55d6ec5b714063146c..b7742e62972a61aae553dca11477e89ab8be6e2e 100644 (file)
@@ -286,9 +286,10 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        r = amdgpu_copy_buffer(ring, old_start, new_start,
                               new_mem->num_pages * PAGE_SIZE, /* bytes */
                               bo->resv, &fence);
-       /* FIXME: handle copy error */
-       r = ttm_bo_move_accel_cleanup(bo, fence,
-                                     evict, no_wait_gpu, new_mem);
+       if (r)
+               return r;
+
+       r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
        fence_put(fence);
        return r;
 }
@@ -396,6 +397,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
                return -EINVAL;
 
        adev = amdgpu_get_adev(bo->bdev);
+
+       /* remember the eviction */
+       if (evict)
+               atomic64_inc(&adev->num_evictions);
+
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                amdgpu_move_null(bo, new_mem);
                return 0;
@@ -429,7 +435,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+               r = ttm_bo_move_memcpy(bo, evict, interruptible,
+                                      no_wait_gpu, new_mem);
                if (r) {
                        return r;
                }
index 01abfc21b4a2249c43b5b0ce747b36ac111940c2..a46a64c125d1f2200727b7aa0e886c8b0aae0abb 100644 (file)
@@ -40,7 +40,7 @@
 #include "uvd/uvd_4_2_d.h"
 
 /* 1 second timeout */
-#define UVD_IDLE_TIMEOUT_MS    1000
+#define UVD_IDLE_TIMEOUT       msecs_to_jiffies(1000)
 /* Polaris10/11 firmware version */
 #define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
 
@@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->uvd.vcpu_bo == NULL)
-               return 0;
+       kfree(adev->uvd.saved_bo);
 
        amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
 
-       r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
-       if (!r) {
-               amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
-               amdgpu_bo_unpin(adev->uvd.vcpu_bo);
-               amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
-       }
+       if (adev->uvd.vcpu_bo) {
+               r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+               if (!r) {
+                       amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+                       amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+                       amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+               }
 
-       amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+               amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+       }
 
        amdgpu_ring_fini(&adev->uvd.ring);
 
@@ -661,7 +662,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
                }
 
                DRM_ERROR("No more free UVD handles!\n");
-               return -EINVAL;
+               return -ENOSPC;
 
        case 1:
                /* it's a decode msg, calc buffer sizes */
@@ -967,7 +968,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 
        if (direct) {
                r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-               job->fence = f;
+               job->fence = fence_get(f);
                if (r)
                        goto err_free;
 
@@ -1105,12 +1106,15 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
        if (fences == 0 && handles == 0) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_uvd(adev, false);
+                       /* just work around for uvd clock remain high even
+                        * when uvd dpm disabled on Polaris10 */
+                       if (adev->asic_type == CHIP_POLARIS10)
+                               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
                } else {
                        amdgpu_asic_set_uvd_clocks(adev, 0, 0);
                }
        } else {
-               schedule_delayed_work(&adev->uvd.idle_work,
-                                     msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+               schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
        }
 }
 
@@ -1118,7 +1122,7 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
 {
        bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
        set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
-                                           msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
+                                           UVD_IDLE_TIMEOUT);
 
        if (set_clocks) {
                if (adev->pm.dpm_enabled) {
index 875626a2eccbd912371e12f4f09e4573521e427c..aeeeb72ebbc4052dc45d22935dea9f66b4870225 100644 (file)
@@ -36,7 +36,7 @@
 #include "cikd.h"
 
 /* 1 second timeout */
-#define VCE_IDLE_TIMEOUT_MS    1000
+#define VCE_IDLE_TIMEOUT       msecs_to_jiffies(1000)
 
 /* Firmware Names */
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -310,8 +310,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
                        amdgpu_asic_set_vce_clocks(adev, 0, 0);
                }
        } else {
-               schedule_delayed_work(&adev->vce.idle_work,
-                                     msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
+               schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
        }
 }
 
@@ -324,17 +323,12 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
  */
 static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
 {
-       bool streams_changed = false;
        bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
-       set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
-                                           msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
 
-       if (adev->pm.dpm_enabled) {
-               /* XXX figure out if the streams changed */
-               streams_changed = false;
-       }
+       set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
+                                           VCE_IDLE_TIMEOUT);
 
-       if (set_clocks || streams_changed) {
+       if (set_clocks) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_vce(adev, true);
                } else {
@@ -357,6 +351,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
        int i, r;
        for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
                uint32_t handle = atomic_read(&adev->vce.handles[i]);
+
                if (!handle || adev->vce.filp[i] != filp)
                        continue;
 
@@ -437,7 +432,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
                ib->ptr[i] = 0x0;
 
        r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-       job->fence = f;
+       job->fence = fence_get(f);
        if (r)
                goto err;
 
@@ -499,7 +494,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
        if (direct) {
                r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
-               job->fence = f;
+               job->fence = fence_get(f);
                if (r)
                        goto err;
 
@@ -580,12 +575,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
  * we we don't have another free session index.
  */
 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
-                                     uint32_t handle, bool *allocated)
+                                     uint32_t handle, uint32_t *allocated)
 {
        unsigned i;
 
-       *allocated = false;
-
        /* validate the handle */
        for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
                if (atomic_read(&p->adev->vce.handles[i]) == handle) {
@@ -602,7 +595,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
                if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
                        p->adev->vce.filp[i] = p->filp;
                        p->adev->vce.img_size[i] = 0;
-                       *allocated = true;
+                       *allocated |= 1 << i;
                        return i;
                }
        }
@@ -622,9 +615,9 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
        struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
        unsigned fb_idx = 0, bs_idx = 0;
        int session_idx = -1;
-       bool destroyed = false;
-       bool created = false;
-       bool allocated = false;
+       uint32_t destroyed = 0;
+       uint32_t created = 0;
+       uint32_t allocated = 0;
        uint32_t tmp, handle = 0;
        uint32_t *size = &tmp;
        int i, r = 0, idx = 0;
@@ -641,30 +634,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                        goto out;
                }
 
-               if (destroyed) {
-                       DRM_ERROR("No other command allowed after destroy!\n");
-                       r = -EINVAL;
-                       goto out;
-               }
-
                switch (cmd) {
-               case 0x00000001: // session
+               case 0x00000001: /* session */
                        handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
                        session_idx = amdgpu_vce_validate_handle(p, handle,
                                                                 &allocated);
-                       if (session_idx < 0)
-                               return session_idx;
+                       if (session_idx < 0) {
+                               r = session_idx;
+                               goto out;
+                       }
                        size = &p->adev->vce.img_size[session_idx];
                        break;
 
-               case 0x00000002: // task info
+               case 0x00000002: /* task info */
                        fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
                        bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
                        break;
 
-               case 0x01000001: // create
-                       created = true;
-                       if (!allocated) {
+               case 0x01000001: /* create */
+                       created |= 1 << session_idx;
+                       if (destroyed & (1 << session_idx)) {
+                               destroyed &= ~(1 << session_idx);
+                               allocated |= 1 << session_idx;
+
+                       } else if (!(allocated & (1 << session_idx))) {
                                DRM_ERROR("Handle already in use!\n");
                                r = -EINVAL;
                                goto out;
@@ -675,16 +668,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                                8 * 3 / 2;
                        break;
 
-               case 0x04000001: // config extension
-               case 0x04000002: // pic control
-               case 0x04000005: // rate control
-               case 0x04000007: // motion estimation
-               case 0x04000008: // rdo
-               case 0x04000009: // vui
-               case 0x05000002: // auxiliary buffer
+               case 0x04000001: /* config extension */
+               case 0x04000002: /* pic control */
+               case 0x04000005: /* rate control */
+               case 0x04000007: /* motion estimation */
+               case 0x04000008: /* rdo */
+               case 0x04000009: /* vui */
+               case 0x05000002: /* auxiliary buffer */
                        break;
 
-               case 0x03000001: // encode
+               case 0x03000001: /* encode */
                        r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
                                                *size, 0);
                        if (r)
@@ -696,18 +689,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                                goto out;
                        break;
 
-               case 0x02000001: // destroy
-                       destroyed = true;
+               case 0x02000001: /* destroy */
+                       destroyed |= 1 << session_idx;
                        break;
 
-               case 0x05000001: // context buffer
+               case 0x05000001: /* context buffer */
                        r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
                                                *size * 2, 0);
                        if (r)
                                goto out;
                        break;
 
-               case 0x05000004: // video bitstream buffer
+               case 0x05000004: /* video bitstream buffer */
                        tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
                        r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
                                                tmp, bs_idx);
@@ -715,7 +708,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                                goto out;
                        break;
 
-               case 0x05000005: // feedback buffer
+               case 0x05000005: /* feedback buffer */
                        r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
                                                4096, fb_idx);
                        if (r)
@@ -737,21 +730,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
                idx += len / 4;
        }
 
-       if (allocated && !created) {
+       if (allocated & ~created) {
                DRM_ERROR("New session without create command!\n");
                r = -ENOENT;
        }
 
 out:
-       if ((!r && destroyed) || (r && allocated)) {
-               /*
-                * IB contains a destroy msg or we have allocated an
-                * handle and got an error, anyway free the handle
-                */
-               for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
-                       atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
+       if (!r) {
+               /* No error, free all destroyed handle slots */
+               tmp = destroyed;
+       } else {
+               /* Error during parsing, free all allocated handle slots */
+               tmp = allocated;
        }
 
+       for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
+               if (tmp & (1 << i))
+                       atomic_set(&p->adev->vce.handles[i], 0);
+
        return r;
 }
 
index 9f36ed30ba115b9be28ee3dff2eea75c2fe796f4..8e642fc48df45c665053a3b835d0abe803d7124b 100644 (file)
@@ -25,6 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
+#include <linux/fence-array.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
 #include "amdgpu.h"
@@ -114,16 +115,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 /**
  * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
  *
+ * @adev: amdgpu device pointer
  * @vm: vm providing the BOs
  * @duplicates: head of duplicates list
  *
  * Add the page directory to the BO duplicates list
  * for command submission.
  */
-void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
+void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                         struct list_head *duplicates)
 {
+       uint64_t num_evictions;
        unsigned i;
 
+       /* We only need to validate the page tables
+        * if they aren't already valid.
+        */
+       num_evictions = atomic64_read(&adev->num_evictions);
+       if (num_evictions == vm->last_eviction_counter)
+               return;
+
        /* add the vm page table to the list */
        for (i = 0; i <= vm->max_pde_used; ++i) {
                struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
@@ -162,6 +173,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
        spin_unlock(&glob->lru_lock);
 }
 
+static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
+                             struct amdgpu_vm_id *id)
+{
+       return id->current_gpu_reset_count !=
+               atomic_read(&adev->gpu_reset_counter) ? true : false;
+}
+
 /**
  * amdgpu_vm_grab_id - allocate the next free VMID
  *
@@ -174,18 +192,67 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
  */
 int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync, struct fence *fence,
-                     unsigned *vm_id, uint64_t *vm_pd_addr)
+                     struct amdgpu_job *job)
 {
-       uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
        struct amdgpu_device *adev = ring->adev;
+       uint64_t fence_context = adev->fence_context + ring->idx;
        struct fence *updates = sync->last_vm_update;
-       struct amdgpu_vm_id *id;
-       unsigned i = ring->idx;
-       int r;
+       struct amdgpu_vm_id *id, *idle;
+       struct fence **fences;
+       unsigned i;
+       int r = 0;
+
+       fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
+                              GFP_KERNEL);
+       if (!fences)
+               return -ENOMEM;
 
        mutex_lock(&adev->vm_manager.lock);
 
+       /* Check if we have an idle VMID */
+       i = 0;
+       list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
+               fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+               if (!fences[i])
+                       break;
+               ++i;
+       }
+
+       /* If we can't find a idle VMID to use, wait till one becomes available */
+       if (&idle->list == &adev->vm_manager.ids_lru) {
+               u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+               unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+               struct fence_array *array;
+               unsigned j;
+
+               for (j = 0; j < i; ++j)
+                       fence_get(fences[j]);
+
+               array = fence_array_create(i, fences, fence_context,
+                                          seqno, true);
+               if (!array) {
+                       for (j = 0; j < i; ++j)
+                               fence_put(fences[j]);
+                       kfree(fences);
+                       r = -ENOMEM;
+                       goto error;
+               }
+
+
+               r = amdgpu_sync_fence(ring->adev, sync, &array->base);
+               fence_put(&array->base);
+               if (r)
+                       goto error;
+
+               mutex_unlock(&adev->vm_manager.lock);
+               return 0;
+
+       }
+       kfree(fences);
+
+       job->vm_needs_flush = true;
        /* Check if we can use a VMID already assigned to this VM */
+       i = ring->idx;
        do {
                struct fence *flushed;
 
@@ -196,67 +263,52 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                /* Check all the prerequisites to using this VMID */
                if (!id)
                        continue;
+               if (amdgpu_vm_is_gpu_reset(adev, id))
+                       continue;
 
                if (atomic64_read(&id->owner) != vm->client_id)
                        continue;
 
-               if (pd_addr != id->pd_gpu_addr)
+               if (job->vm_pd_addr != id->pd_gpu_addr)
                        continue;
 
-               if (id->last_user != ring &&
-                   (!id->last_flush || !fence_is_signaled(id->last_flush)))
+               if (!id->last_flush)
                        continue;
 
-               flushed  = id->flushed_updates;
-               if (updates && (!flushed || fence_is_later(updates, flushed)))
+               if (id->last_flush->context != fence_context &&
+                   !fence_is_signaled(id->last_flush))
                        continue;
 
-               /* Good we can use this VMID */
-               if (id->last_user == ring) {
-                       r = amdgpu_sync_fence(ring->adev, sync,
-                                             id->first);
-                       if (r)
-                               goto error;
-               }
+               flushed  = id->flushed_updates;
+               if (updates &&
+                   (!flushed || fence_is_later(updates, flushed)))
+                       continue;
 
-               /* And remember this submission as user of the VMID */
+               /* Good we can use this VMID. Remember this submission as
+                * user of the VMID.
+                */
                r = amdgpu_sync_fence(ring->adev, &id->active, fence);
                if (r)
                        goto error;
 
+               id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
                list_move_tail(&id->list, &adev->vm_manager.ids_lru);
                vm->ids[ring->idx] = id;
 
-               *vm_id = id - adev->vm_manager.ids;
-               *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
-               trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+               job->vm_id = id - adev->vm_manager.ids;
+               job->vm_needs_flush = false;
+               trace_amdgpu_vm_grab_id(vm, ring->idx, job);
 
                mutex_unlock(&adev->vm_manager.lock);
                return 0;
 
        } while (i != ring->idx);
 
-       id = list_first_entry(&adev->vm_manager.ids_lru,
-                             struct amdgpu_vm_id,
-                             list);
-
-       if (!amdgpu_sync_is_idle(&id->active)) {
-               struct list_head *head = &adev->vm_manager.ids_lru;
-               struct amdgpu_vm_id *tmp;
+       /* Still no ID to use? Then use the idle one found earlier */
+       id = idle;
 
-               list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
-                                        list) {
-                       if (amdgpu_sync_is_idle(&id->active)) {
-                               list_move(&id->list, head);
-                               head = &id->list;
-                       }
-               }
-               id = list_first_entry(&adev->vm_manager.ids_lru,
-                                     struct amdgpu_vm_id,
-                                     list);
-       }
-
-       r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
+       /* Remember this submission as user of the VMID */
+       r = amdgpu_sync_fence(ring->adev, &id->active, fence);
        if (r)
                goto error;
 
@@ -269,22 +321,46 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        fence_put(id->flushed_updates);
        id->flushed_updates = fence_get(updates);
 
-       id->pd_gpu_addr = pd_addr;
-
+       id->pd_gpu_addr = job->vm_pd_addr;
+       id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
        list_move_tail(&id->list, &adev->vm_manager.ids_lru);
-       id->last_user = ring;
        atomic64_set(&id->owner, vm->client_id);
        vm->ids[ring->idx] = id;
 
-       *vm_id = id - adev->vm_manager.ids;
-       *vm_pd_addr = pd_addr;
-       trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+       job->vm_id = id - adev->vm_manager.ids;
+       trace_amdgpu_vm_grab_id(vm, ring->idx, job);
 
 error:
        mutex_unlock(&adev->vm_manager.lock);
        return r;
 }
 
+static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       const struct amdgpu_ip_block_version *ip_block;
+
+       if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
+               /* only compute rings */
+               return false;
+
+       ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+       if (!ip_block)
+               return false;
+
+       if (ip_block->major <= 7) {
+               /* gfx7 has no workaround */
+               return true;
+       } else if (ip_block->major == 8) {
+               if (adev->gfx.mec_fw_version >= 673)
+                       /* gfx8 is fixed in MEC firmware 673 */
+                       return false;
+               else
+                       return true;
+       }
+       return false;
+}
+
 /**
  * amdgpu_vm_flush - hardware flush the vm
  *
@@ -294,59 +370,52 @@ error:
  *
  * Emit a VM flush when it is necessary.
  */
-int amdgpu_vm_flush(struct amdgpu_ring *ring,
-                   unsigned vm_id, uint64_t pd_addr,
-                   uint32_t gds_base, uint32_t gds_size,
-                   uint32_t gws_base, uint32_t gws_size,
-                   uint32_t oa_base, uint32_t oa_size)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
+       struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
        bool gds_switch_needed = ring->funcs->emit_gds_switch && (
-               id->gds_base != gds_base ||
-               id->gds_size != gds_size ||
-               id->gws_base != gws_base ||
-               id->gws_size != gws_size ||
-               id->oa_base != oa_base ||
-               id->oa_size != oa_size);
+               id->gds_base != job->gds_base ||
+               id->gds_size != job->gds_size ||
+               id->gws_base != job->gws_base ||
+               id->gws_size != job->gws_size ||
+               id->oa_base != job->oa_base ||
+               id->oa_size != job->oa_size);
        int r;
 
        if (ring->funcs->emit_pipeline_sync && (
-           pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
-                   ring->type == AMDGPU_RING_TYPE_COMPUTE))
+           job->vm_needs_flush || gds_switch_needed ||
+           amdgpu_vm_ring_has_compute_vm_bug(ring)))
                amdgpu_ring_emit_pipeline_sync(ring);
 
-       if (ring->funcs->emit_vm_flush &&
-           pd_addr != AMDGPU_VM_NO_FLUSH) {
+       if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
+           amdgpu_vm_is_gpu_reset(adev, id))) {
                struct fence *fence;
 
-               trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
-               amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+               trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
+               amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+
+               r = amdgpu_fence_emit(ring, &fence);
+               if (r)
+                       return r;
 
                mutex_lock(&adev->vm_manager.lock);
-               if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
-                       r = amdgpu_fence_emit(ring, &fence);
-                       if (r) {
-                               mutex_unlock(&adev->vm_manager.lock);
-                               return r;
-                       }
-                       fence_put(id->last_flush);
-                       id->last_flush = fence;
-               }
+               fence_put(id->last_flush);
+               id->last_flush = fence;
                mutex_unlock(&adev->vm_manager.lock);
        }
 
        if (gds_switch_needed) {
-               id->gds_base = gds_base;
-               id->gds_size = gds_size;
-               id->gws_base = gws_base;
-               id->gws_size = gws_size;
-               id->oa_base = oa_base;
-               id->oa_size = oa_size;
-               amdgpu_ring_emit_gds_switch(ring, vm_id,
-                                           gds_base, gds_size,
-                                           gws_base, gws_size,
-                                           oa_base, oa_size);
+               id->gds_base = job->gds_base;
+               id->gds_size = job->gds_size;
+               id->gws_base = job->gws_base;
+               id->gws_size = job->gws_size;
+               id->oa_base = job->oa_base;
+               id->oa_size = job->oa_size;
+               amdgpu_ring_emit_gds_switch(ring, job->vm_id,
+                                           job->gds_base, job->gds_size,
+                                           job->gws_base, job->gws_size,
+                                           job->oa_base, job->oa_size);
        }
 
        return 0;
@@ -723,7 +792,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
  * @vm: requested vm
  * @start: start of GPU address range
  * @end: end of GPU address range
- * @dst: destination address to map to
+ * @dst: destination address to map to, the next dst inside the function
  * @flags: mapping flags
  *
  * Update the page tables in the range @start - @end.
@@ -737,49 +806,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
 {
        const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
 
-       uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0;
-       uint64_t addr;
+       uint64_t cur_pe_start, cur_pe_end, cur_dst;
+       uint64_t addr; /* next GPU address to be updated */
+       uint64_t pt_idx;
+       struct amdgpu_bo *pt;
+       unsigned nptes; /* next number of ptes to be updated */
+       uint64_t next_pe_start;
+
+       /* initialize the variables */
+       addr = start;
+       pt_idx = addr >> amdgpu_vm_block_size;
+       pt = vm->page_tables[pt_idx].entry.robj;
+
+       if ((addr & ~mask) == (end & ~mask))
+               nptes = end - addr;
+       else
+               nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+
+       cur_pe_start = amdgpu_bo_gpu_offset(pt);
+       cur_pe_start += (addr & mask) * 8;
+       cur_pe_end = cur_pe_start + 8 * nptes;
+       cur_dst = dst;
+
+       /* for next ptb*/
+       addr += nptes;
+       dst += nptes * AMDGPU_GPU_PAGE_SIZE;
 
        /* walk over the address space and update the page tables */
-       for (addr = start; addr < end; ) {
-               uint64_t pt_idx = addr >> amdgpu_vm_block_size;
-               struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
-               unsigned nptes;
-               uint64_t pe_start;
+       while (addr < end) {
+               pt_idx = addr >> amdgpu_vm_block_size;
+               pt = vm->page_tables[pt_idx].entry.robj;
 
                if ((addr & ~mask) == (end & ~mask))
                        nptes = end - addr;
                else
                        nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
 
-               pe_start = amdgpu_bo_gpu_offset(pt);
-               pe_start += (addr & mask) * 8;
-
-               if (last_pe_end != pe_start) {
+               next_pe_start = amdgpu_bo_gpu_offset(pt);
+               next_pe_start += (addr & mask) * 8;
 
+               if (cur_pe_end == next_pe_start) {
+                       /* The next ptb is consecutive to current ptb.
+                        * Don't call amdgpu_vm_frag_ptes now.
+                        * Will update two ptbs together in future.
+                       */
+                       cur_pe_end += 8 * nptes;
+               } else {
                        amdgpu_vm_frag_ptes(adev, vm_update_params,
-                                           last_pe_start, last_pe_end,
-                                           last_dst, flags);
+                                           cur_pe_start, cur_pe_end,
+                                           cur_dst, flags);
 
-                       last_pe_start = pe_start;
-                       last_pe_end = pe_start + 8 * nptes;
-                       last_dst = dst;
-               } else {
-                       last_pe_end += 8 * nptes;
+                       cur_pe_start = next_pe_start;
+                       cur_pe_end = next_pe_start + 8 * nptes;
+                       cur_dst = dst;
                }
 
+               /* for next ptb*/
                addr += nptes;
                dst += nptes * AMDGPU_GPU_PAGE_SIZE;
        }
 
-       amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
-                           last_pe_end, last_dst, flags);
+       amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
+                           cur_pe_end, cur_dst, flags);
 }
 
 /**
  * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
  *
  * @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
  * @src: address where to copy page table entries from
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
@@ -793,6 +888,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
  * Returns 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+                                      struct fence *exclusive,
                                       uint64_t src,
                                       dma_addr_t *pages_addr,
                                       struct amdgpu_vm *vm,
@@ -853,6 +949,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        vm_update_params.ib = &job->ibs[0];
 
+       r = amdgpu_sync_fence(adev, &job->sync, exclusive);
+       if (r)
+               goto error_free;
+
        r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
                             owner);
        if (r)
@@ -889,6 +989,7 @@ error_free:
  * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
  *
  * @adev: amdgpu_device pointer
+ * @exclusive: fence we need to sync to
  * @gtt_flags: flags as they are used for GTT
  * @pages_addr: DMA addresses to use for mapping
  * @vm: requested vm
@@ -902,6 +1003,7 @@ error_free:
  * Returns 0 for success, -EINVAL for failure.
  */
 static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
+                                     struct fence *exclusive,
                                      uint32_t gtt_flags,
                                      dma_addr_t *pages_addr,
                                      struct amdgpu_vm *vm,
@@ -932,7 +1034,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
        addr += mapping->offset;
 
        if (!pages_addr || src)
-               return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+               return amdgpu_vm_bo_update_mapping(adev, exclusive,
+                                                  src, pages_addr, vm,
                                                   start, mapping->it.last,
                                                   flags, addr, fence);
 
@@ -940,7 +1043,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                uint64_t last;
 
                last = min((uint64_t)mapping->it.last, start + max_size - 1);
-               r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
+               r = amdgpu_vm_bo_update_mapping(adev, exclusive,
+                                               src, pages_addr, vm,
                                                start, last, flags, addr,
                                                fence);
                if (r)
@@ -973,6 +1077,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        struct amdgpu_bo_va_mapping *mapping;
        dma_addr_t *pages_addr = NULL;
        uint32_t gtt_flags, flags;
+       struct fence *exclusive;
        uint64_t addr;
        int r;
 
@@ -994,8 +1099,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                default:
                        break;
                }
+
+               exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
        } else {
                addr = 0;
+               exclusive = NULL;
        }
 
        flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
@@ -1007,7 +1115,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        spin_unlock(&vm->status_lock);
 
        list_for_each_entry(mapping, &bo_va->invalids, list) {
-               r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
+               r = amdgpu_vm_bo_split_mapping(adev, exclusive,
+                                              gtt_flags, pages_addr, vm,
                                               mapping, flags, addr,
                                               &bo_va->last_pt_update);
                if (r)
@@ -1054,7 +1163,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                        struct amdgpu_bo_va_mapping, list);
                list_del(&mapping->list);
 
-               r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
+               r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
                                               0, 0, NULL);
                kfree(mapping);
                if (r)
@@ -1445,6 +1554,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        amdgpu_bo_unreserve(vm->page_directory);
        if (r)
                goto error_free_page_directory;
+       vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
 
        return 0;
 
@@ -1516,6 +1626,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
                              &adev->vm_manager.ids_lru);
        }
 
+       adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+               adev->vm_manager.seqno[i] = 0;
+
        atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
        atomic64_set(&adev->vm_manager.client_counter, 0);
 }
index 48b6bd671cda80fbed1fa1dc6efe85ddc686eec3..c32eca26155c7be0c57d53c2763b015676269e41 100644 (file)
@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
                        if (dig->backlight_level == 0)
                                amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
                                                                       ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
index 13cdb01e9b45022aebf6d052523d718d52ceaa91..bc56c8a181e628b575861233bd705dd986f9f061 100644 (file)
@@ -156,3 +156,18 @@ u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
+{
+       PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+
+       args.ucRegIndex = offset;
+       args.lpI2CDataOut = data;
+       args.ucFlag = 1;
+       args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+       args.ucTransBytes = 1;
+       args.ucSlaveAddr = slave_addr;
+       args.ucLineNumber = line_number;
+
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
index d6128d9de56e4b9db096de8e07d30d51e2e4a4f2..251aaf41f65d5a277104c3b9fda29fc71bbca680 100644 (file)
@@ -27,5 +27,7 @@
 int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
                      struct i2c_msg *msgs, int num);
 u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev,
+               u8 slave_addr, u8 line_number, u8 offset, u8 data);
 
 #endif
index ea407db1fbcfaaa54c844e7f41e06eb3a04bb1a2..573bc973d6921a7f11dc96ab698fabb43fc81fce 100644 (file)
@@ -50,7 +50,9 @@
 #include "gmc/gmc_7_1_sh_mask.h"
 
 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
 
 #define MC_CG_ARB_FREQ_F0           0x0a
 #define MC_CG_ARB_FREQ_F1           0x0b
@@ -84,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
        { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
 };
 
+#if 0
 static const struct ci_pt_defaults defaults_bonaire_pro =
 {
        1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
        { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
        { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
 };
+#endif
 
 static const struct ci_pt_defaults defaults_saturn_xt =
 {
@@ -736,19 +740,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
 
        if (pi->caps_sq_ramping || pi->caps_db_ramping ||
            pi->caps_td_ramping || pi->caps_tcp_ramping) {
-               gfx_v7_0_enter_rlc_safe_mode(adev);
+               adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
                if (enable) {
                        ret = ci_program_pt_config_registers(adev, didt_config_ci);
                        if (ret) {
-                               gfx_v7_0_exit_rlc_safe_mode(adev);
+                               adev->gfx.rlc.funcs->exit_safe_mode(adev);
                                return ret;
                        }
                }
 
                ci_do_enable_didt(adev, enable);
 
-               gfx_v7_0_exit_rlc_safe_mode(adev);
+               adev->gfx.rlc.funcs->exit_safe_mode(adev);
        }
 
        return 0;
@@ -3636,6 +3640,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
 
        ci_setup_default_pcie_tables(adev);
 
+       /* save a copy of the default DPM table */
+       memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
+                       sizeof(struct ci_dpm_table));
+
        return 0;
 }
 
@@ -5754,10 +5762,18 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
 
        switch (adev->asic_type) {
        case CHIP_BONAIRE:
-               chip_name = "bonaire";
+               if ((adev->pdev->revision == 0x80) ||
+                   (adev->pdev->revision == 0x81) ||
+                   (adev->pdev->device == 0x665f))
+                       chip_name = "bonaire_k";
+               else
+                       chip_name = "bonaire";
                break;
        case CHIP_HAWAII:
-               chip_name = "hawaii";
+               if (adev->pdev->revision == 0x80)
+                       chip_name = "hawaii_k";
+               else
+                       chip_name = "hawaii";
                break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
@@ -6221,6 +6237,9 @@ static int ci_dpm_sw_fini(void *handle)
        ci_dpm_fini(adev);
        mutex_unlock(&adev->pm.mutex);
 
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
@@ -6401,6 +6420,186 @@ static int ci_dpm_set_powergating_state(void *handle,
        return 0;
 }
 
+static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
+               enum pp_clock_type type, char *buf)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+       struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+       struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+
+       int i, now, size = 0;
+       uint32_t clock, pcie_speed;
+
+       switch (type) {
+       case PP_SCLK:
+               amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
+               clock = RREG32(mmSMC_MSG_ARG_0);
+
+               for (i = 0; i < sclk_table->count; i++) {
+                       if (clock > sclk_table->dpm_levels[i].value)
+                               continue;
+                       break;
+               }
+               now = i;
+
+               for (i = 0; i < sclk_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                                       i, sclk_table->dpm_levels[i].value / 100,
+                                       (i == now) ? "*" : "");
+               break;
+       case PP_MCLK:
+               amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
+               clock = RREG32(mmSMC_MSG_ARG_0);
+
+               for (i = 0; i < mclk_table->count; i++) {
+                       if (clock > mclk_table->dpm_levels[i].value)
+                               continue;
+                       break;
+               }
+               now = i;
+
+               for (i = 0; i < mclk_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                                       i, mclk_table->dpm_levels[i].value / 100,
+                                       (i == now) ? "*" : "");
+               break;
+       case PP_PCIE:
+               pcie_speed = ci_get_current_pcie_speed(adev);
+               for (i = 0; i < pcie_table->count; i++) {
+                       if (pcie_speed != pcie_table->dpm_levels[i].value)
+                               continue;
+                       break;
+               }
+               now = i;
+
+               for (i = 0; i < pcie_table->count; i++)
+                       size += sprintf(buf + size, "%d: %s %s\n", i,
+                                       (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
+                                       (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+                                       (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+                                       (i == now) ? "*" : "");
+               break;
+       default:
+               break;
+       }
+
+       return size;
+}
+
+static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
+               enum pp_clock_type type, uint32_t mask)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+
+       if (adev->pm.dpm.forced_level
+                       != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
+               return -EINVAL;
+
+       switch (type) {
+       case PP_SCLK:
+               if (!pi->sclk_dpm_key_disabled)
+                       amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+                                       PPSMC_MSG_SCLKDPM_SetEnabledMask,
+                                       pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+               break;
+
+       case PP_MCLK:
+               if (!pi->mclk_dpm_key_disabled)
+                       amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+                                       PPSMC_MSG_MCLKDPM_SetEnabledMask,
+                                       pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+               break;
+
+       case PP_PCIE:
+       {
+               uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+               uint32_t level = 0;
+
+               while (tmp >>= 1)
+                       level++;
+
+               if (!pi->pcie_dpm_key_disabled)
+                       amdgpu_ci_send_msg_to_smc_with_parameter(adev,
+                                       PPSMC_MSG_PCIeDPM_ForceLevel,
+                                       level);
+               break;
+       }
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
+       struct ci_single_dpm_table *golden_sclk_table =
+                       &(pi->golden_dpm_table.sclk_table);
+       int value;
+
+       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+                       100 /
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return value;
+}
+
+static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+       struct ci_single_dpm_table *golden_sclk_table =
+                       &(pi->golden_dpm_table.sclk_table);
+
+       if (value > 20)
+               value = 20;
+
+       ps->performance_levels[ps->performance_level_count - 1].sclk =
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+                       value / 100 +
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return 0;
+}
+
+static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
+       struct ci_single_dpm_table *golden_mclk_table =
+                       &(pi->golden_dpm_table.mclk_table);
+       int value;
+
+       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+                       100 /
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return value;
+}
+
+static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+       struct ci_power_info *pi = ci_get_pi(adev);
+       struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
+       struct ci_single_dpm_table *golden_mclk_table =
+                       &(pi->golden_dpm_table.mclk_table);
+
+       if (value > 20)
+               value = 20;
+
+       ps->performance_levels[ps->performance_level_count - 1].mclk =
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+                       value / 100 +
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return 0;
+}
+
 const struct amd_ip_funcs ci_dpm_ip_funcs = {
        .name = "ci_dpm",
        .early_init = ci_dpm_early_init,
@@ -6435,6 +6634,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
        .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
        .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
        .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
+       .print_clock_levels = ci_dpm_print_clock_levels,
+       .force_clock_level = ci_dpm_force_clock_level,
+       .get_sclk_od = ci_dpm_get_sclk_od,
+       .set_sclk_od = ci_dpm_set_sclk_od,
+       .get_mclk_od = ci_dpm_get_mclk_od,
+       .set_mclk_od = ci_dpm_set_mclk_od,
 };
 
 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
index faccc30c93bf9a023546cbdc97733688e07f6a45..91be2996ae7cee447703cd47e82568cac241a160 100644 (file)
@@ -193,6 +193,7 @@ struct ci_pt_defaults {
 
 struct ci_power_info {
        struct ci_dpm_table dpm_table;
+       struct ci_dpm_table golden_dpm_table;
        u32 voltage_control;
        u32 mvdd_control;
        u32 vddci_control;
index 07bc795a4ca9f87662bdaaa6ebd34f6846bb932b..a7de4d18ac946c2f8da67ba16664ed6f956c985f 100644 (file)
@@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
+static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
+{
+       /* CIK does not support SR-IOV */
+       return 0;
+}
+
 static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
        {mmGRBM_STATUS, false},
        {mmGB_ADDR_CONFIG, false},
@@ -1029,12 +1035,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
 
        mutex_lock(&adev->grbm_idx_mutex);
        if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               gfx_v7_0_select_se_sh(adev, se_num, sh_num);
+               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
        val = RREG32(reg_offset);
 
        if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
        return val;
 }
@@ -1152,10 +1158,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
        WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
 }
 
-static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 {
        struct kv_reset_save_regs kv_save = { 0 };
        u32 i;
+       int r = -EINVAL;
 
        dev_info(adev->dev, "GPU pci config reset\n");
 
@@ -1171,14 +1178,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
-               if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
+               if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+                       /* enable BM */
+                       pci_set_master(adev->pdev);
+                       r = 0;
                        break;
+               }
                udelay(1);
        }
 
        /* does asic init need to be run first??? */
        if (adev->flags & AMD_IS_APU)
                kv_restore_regs_for_reset(adev, &kv_save);
+
+       return r;
 }
 
 static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -1204,13 +1217,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
  */
 static int cik_asic_reset(struct amdgpu_device *adev)
 {
+       int r;
        cik_set_bios_scratch_engine_hung(adev, true);
 
-       cik_gpu_pci_config_reset(adev);
+       r = cik_gpu_pci_config_reset(adev);
 
        cik_set_bios_scratch_engine_hung(adev, false);
 
-       return 0;
+       return r;
 }
 
 static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -2007,9 +2021,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
        .get_xclk = &cik_get_xclk,
        .set_uvd_clocks = &cik_set_uvd_clocks,
        .set_vce_clocks = &cik_set_vce_clocks,
-       /* these should be moved to their own ip modules */
-       .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
-       .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
+       .get_virtual_caps = &cik_get_virtual_caps,
 };
 
 static int cik_common_early_init(void *handle)
index 518dca43b133a2eaa638031be93c4af9522c5b81..6507a7ee75e4c09457772183c9a88217f5d79392 100644 (file)
@@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
 
 u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
 
+
+static void cik_sdma_free_microcode(struct amdgpu_device *adev)
+{
+       int i;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+                       release_firmware(adev->sdma.instance[i].fw);
+                       adev->sdma.instance[i].fw = NULL;
+       }
+}
+
 /*
  * sDMA - System DMA
  * Starting with CIK, the GPU has new asynchronous
@@ -214,17 +224,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
                                  unsigned vm_id, bool ctx_switch)
 {
        u32 extra_bits = vm_id & 0xf;
-       u32 next_rptr = ring->wptr + 5;
-
-       while ((next_rptr & 7) != 4)
-               next_rptr++;
-
-       next_rptr += 4;
-       amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, 1); /* number of DWs to follow */
-       amdgpu_ring_write(ring, next_rptr);
 
        /* IB packet must end on a 8 DW boundary */
        cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
@@ -419,6 +418,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
                /* set the wb address whether it's enabled or not */
                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -446,7 +447,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
+
+       cik_sdma_enable(adev, true);
 
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -529,8 +535,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       /* unhalt the MEs */
-       cik_sdma_enable(adev, true);
+       /* halt the engine before programing */
+       cik_sdma_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = cik_sdma_gfx_resume(adev);
@@ -616,7 +622,6 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct fence *f = NULL;
-       unsigned i;
        unsigned index;
        int r;
        u32 tmp = 0;
@@ -638,7 +643,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
                goto err0;
        }
 
-       ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+       ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+                               SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
        ib.ptr[1] = lower_32_bits(gpu_addr);
        ib.ptr[2] = upper_32_bits(gpu_addr);
        ib.ptr[3] = 1;
@@ -653,23 +659,15 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
                DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
                goto err1;
        }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = le32_to_cpu(adev->wb.wb[index]);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
-               goto err1;
+       tmp = le32_to_cpu(adev->wb.wb[index]);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
        } else {
                DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
                r = -EINVAL;
        }
 
 err1:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err0:
@@ -998,6 +996,7 @@ static int cik_sdma_sw_fini(void *handle)
        for (i = 0; i < adev->sdma.num_instances; i++)
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+       cik_sdma_free_microcode(adev);
        return 0;
 }
 
index 933e425a81545c22d80945f08da923f964fd0d16..8ba07e79d4cb17317cebd37811b24bcf046ef847 100644 (file)
@@ -2219,6 +2219,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
                        }
                }
        } else { /*pi->caps_vce_pg*/
+               pi->vce_power_gated = gate;
                cz_update_vce_dpm(adev);
                cz_enable_vce_dpm(adev, !gate);
        }
index 8227344d2ff6e8ce43d0ce41e4c1179b57a2b6d1..c1b04e9aab574301acd7594537a9b397d9a5cf7c 100644 (file)
@@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
        }
 }
 
-static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-                                   u16 *blue, uint32_t start, uint32_t size)
+static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                   u16 *blue, uint32_t size)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       int end = (start + size > 256) ? 256 : start + size, i;
+       int i;
 
        /* userspace palettes are always correct as is */
-       for (i = start; i < end; i++) {
+       for (i = 0; i < size; i++) {
                amdgpu_crtc->lut_r[i] = red[i] >> 6;
                amdgpu_crtc->lut_g[i] = green[i] >> 6;
                amdgpu_crtc->lut_b[i] = blue[i] >> 6;
        }
        dce_v10_0_crtc_load_lut(crtc);
+
+       return 0;
 }
 
 static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
                amdgpu_irq_update(adev, &adev->crtc_irq, type);
                amdgpu_irq_update(adev, &adev->pageflip_irq, type);
-               drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+               drm_crtc_vblank_on(crtc);
                dce_v10_0_crtc_load_lut(crtc);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+               drm_crtc_vblank_off(crtc);
                if (amdgpu_crtc->enabled) {
                        dce_v10_0_vga_enable(crtc, true);
                        amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
 
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
-       drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+       drm_crtc_vblank_put(&amdgpu_crtc->base);
        schedule_work(&works->unpin_work);
 
        return 0;
index af26ec0bc59d35b1c09b00cb7be2fb4c5ca5ede0..d4bf133908b17ae2c58e5156acf2175d493f6b58 100644 (file)
@@ -307,11 +307,10 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
        struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
        u32 tmp;
 
-       /* flip at hsync for async, default is vsync */
-       /* use UPDATE_IMMEDIATE_EN instead for async? */
+       /* flip immediate for async, default is vsync */
        tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
        tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
-                           GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+                           GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
        WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
        /* update the scanout addresses */
        WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2678,19 +2677,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
        }
 }
 
-static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-                                   u16 *blue, uint32_t start, uint32_t size)
+static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                   u16 *blue, uint32_t size)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       int end = (start + size > 256) ? 256 : start + size, i;
+       int i;
 
        /* userspace palettes are always correct as is */
-       for (i = start; i < end; i++) {
+       for (i = 0; i < size; i++) {
                amdgpu_crtc->lut_r[i] = red[i] >> 6;
                amdgpu_crtc->lut_g[i] = green[i] >> 6;
                amdgpu_crtc->lut_b[i] = blue[i] >> 6;
        }
        dce_v11_0_crtc_load_lut(crtc);
+
+       return 0;
 }
 
 static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2728,13 +2729,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
                amdgpu_irq_update(adev, &adev->crtc_irq, type);
                amdgpu_irq_update(adev, &adev->pageflip_irq, type);
-               drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+               drm_crtc_vblank_on(crtc);
                dce_v11_0_crtc_load_lut(crtc);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+               drm_crtc_vblank_off(crtc);
                if (amdgpu_crtc->enabled) {
                        dce_v11_0_vga_enable(crtc, true);
                        amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3433,7 +3434,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
 
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
-       drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+       drm_crtc_vblank_put(&amdgpu_crtc->base);
        schedule_work(&works->unpin_work);
 
        return 0;
index 3fb65e41a6ef5e9402a4c2a3df4b30e1372ac5c8..4fdfab1e920012b4e770071af2c05e0357753c07 100644 (file)
@@ -526,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
                crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
                                             CRTC_CONTROL, CRTC_MASTER_EN);
                if (crtc_enabled) {
-#if 0
-                       u32 frame_count;
-                       int j;
-
+#if 1
                        save->crtc_enabled[i] = true;
                        tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
                        if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
-                               amdgpu_display_vblank_wait(adev, i);
-                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+                               /*it is correct only for RGB ; black is 0*/
+                               WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
                                tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
                                WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-                               WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-                       }
-                       /* wait for the next frame */
-                       frame_count = amdgpu_display_vblank_get_counter(adev, i);
-                       for (j = 0; j < adev->usec_timeout; j++) {
-                               if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-                                       break;
-                               udelay(1);
-                       }
-                       tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
-                               tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
-                               WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-                       }
-                       tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
-                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
-                               WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
                        }
+                       mdelay(20);
 #else
                        /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
                        WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
@@ -575,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
 static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
                                      struct amdgpu_mode_mc_save *save)
 {
-       u32 tmp, frame_count;
-       int i, j;
+       u32 tmp;
+       int i;
 
        /* update crtc base addresses */
        for (i = 0; i < adev->mode_info.num_crtc; i++) {
                WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
                       upper_32_bits(adev->mc.vram_start));
-               WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
-                      upper_32_bits(adev->mc.vram_start));
                WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
                       (u32)adev->mc.vram_start);
-               WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
-                      (u32)adev->mc.vram_start);
 
                if (save->crtc_enabled[i]) {
-                       tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
-                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
-                               WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
-                       }
-                       tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
-                               tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
-                               WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
-                       }
-                       tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
-                       if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
-                               tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
-                               WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
-                       }
-                       for (j = 0; j < adev->usec_timeout; j++) {
-                               tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
-                               if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
-                                       break;
-                               udelay(1);
-                       }
                        tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
                        tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
-                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                        WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
-                       WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
-                       /* wait for the next frame */
-                       frame_count = amdgpu_display_vblank_get_counter(adev, i);
-                       for (j = 0; j < adev->usec_timeout; j++) {
-                               if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
-                                       break;
-                               udelay(1);
-                       }
                }
+               mdelay(20);
        }
 
        WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
@@ -2574,19 +2521,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
        }
 }
 
-static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-                                   u16 *blue, uint32_t start, uint32_t size)
+static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                  u16 *blue, uint32_t size)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       int end = (start + size > 256) ? 256 : start + size, i;
+       int i;
 
        /* userspace palettes are always correct as is */
-       for (i = start; i < end; i++) {
+       for (i = 0; i < size; i++) {
                amdgpu_crtc->lut_r[i] = red[i] >> 6;
                amdgpu_crtc->lut_g[i] = green[i] >> 6;
                amdgpu_crtc->lut_b[i] = blue[i] >> 6;
        }
        dce_v8_0_crtc_load_lut(crtc);
+
+       return 0;
 }
 
 static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2624,13 +2573,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
                amdgpu_irq_update(adev, &adev->crtc_irq, type);
                amdgpu_irq_update(adev, &adev->pageflip_irq, type);
-               drm_vblank_on(dev, amdgpu_crtc->crtc_id);
+               drm_crtc_vblank_on(crtc);
                dce_v8_0_crtc_load_lut(crtc);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               drm_vblank_off(dev, amdgpu_crtc->crtc_id);
+               drm_crtc_vblank_off(crtc);
                if (amdgpu_crtc->enabled) {
                        dce_v8_0_vga_enable(crtc, true);
                        amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3376,7 +3325,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
 
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
-       drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+       drm_crtc_vblank_put(&amdgpu_crtc->base);
        schedule_work(&works->unpin_work);
 
        return 0;
index 245cabf06575c73e1e9427011d878237e2e96fdf..ed03b75175d48b1ba24754f2e12499ac1041bb17 100644 (file)
@@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
 
 static int fiji_dpm_sw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index 7f18a53ab53ac8bc18c992d7fdc76d10a283c3d4..1ac5ad01bf58df2764bc14642b0fcb2d809a115e 100644 (file)
@@ -991,6 +991,22 @@ out:
        return err;
 }
 
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+       release_firmware(adev->gfx.pfp_fw);
+       adev->gfx.pfp_fw = NULL;
+       release_firmware(adev->gfx.me_fw);
+       adev->gfx.me_fw = NULL;
+       release_firmware(adev->gfx.ce_fw);
+       adev->gfx.ce_fw = NULL;
+       release_firmware(adev->gfx.mec_fw);
+       adev->gfx.mec_fw = NULL;
+       release_firmware(adev->gfx.mec2_fw);
+       adev->gfx.mec2_fw = NULL;
+       release_firmware(adev->gfx.rlc_fw);
+       adev->gfx.rlc_fw = NULL;
+}
+
 /**
  * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
  *
@@ -1567,9 +1583,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
  * registers are instanced per SE or SH.  0xffffffff means
  * broadcast to all SEs or SHs (CIK).
  */
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
+                                 u32 se_num, u32 sh_num, u32 instance)
 {
-       u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK;
+       u32 data;
+
+       if (instance == 0xffffffff)
+               data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+       else
+               data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
 
        if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
                data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
@@ -1643,13 +1665,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v7_0_select_se_sh(adev, i, j);
+                       gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
                        data = gfx_v7_0_get_rb_active_bitmap(adev);
                        active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
                                               rb_bitmap_width_per_sh);
                }
        }
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        adev->gfx.config.backend_enable_mask = active_rbs;
@@ -1730,7 +1752,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
         * making sure that the following register writes will be broadcasted
         * to all the shaders
         */
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        /* XXX SH_MEM regs */
        /* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2034,17 +2056,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                      unsigned vm_id, bool ctx_switch)
 {
        u32 header, control = 0;
-       u32 next_rptr = ring->wptr + 5;
-
-       if (ctx_switch)
-               next_rptr += 2;
-
-       next_rptr += 4;
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
        if (ctx_switch) {
@@ -2073,22 +2084,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                          struct amdgpu_ib *ib,
                                          unsigned vm_id, bool ctx_switch)
 {
-       u32 header, control = 0;
-       u32 next_rptr = ring->wptr + 5;
+       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
 
-       control |= INDIRECT_BUFFER_VALID;
-       next_rptr += 4;
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, next_rptr);
-
-       header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
-
-       control |= ib->length_dw | (vm_id << 24);
-
-       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
                                          (2 << 0) |
@@ -2114,7 +2112,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
        struct fence *f = NULL;
        uint32_t scratch;
        uint32_t tmp = 0;
-       unsigned i;
        int r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
@@ -2143,16 +2140,9 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
                DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
                goto err2;
        }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(scratch);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
-               goto err2;
+       tmp = RREG32(scratch);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
        } else {
                DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
@@ -2160,7 +2150,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
        }
 
 err2:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err1:
@@ -3205,7 +3194,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
                }
        }
        adev->gfx.rlc.cs_data = ci_cs_data;
-       adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+       adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+       adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
 
        src_ptr = adev->gfx.rlc.reg_list;
        dws = adev->gfx.rlc.reg_list_size;
@@ -3363,7 +3353,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v7_0_select_se_sh(adev, i, j);
+                       gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
                        for (k = 0; k < adev->usec_timeout; k++) {
                                if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
                                        break;
@@ -3371,7 +3361,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
                        }
                }
        }
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3418,7 +3408,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
        return orig;
 }
 
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
 {
        u32 tmp, i, mask;
 
@@ -3440,7 +3430,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
        }
 }
 
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
 {
        u32 tmp;
 
@@ -3455,7 +3445,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
  *
  * Halt the RLC ME (MicroEngine) (CIK).
  */
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
+static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
 {
        WREG32(mmRLC_CNTL, 0);
 
@@ -3531,7 +3521,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
        WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
 
        mutex_lock(&adev->grbm_idx_mutex);
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
        WREG32(mmRLC_LB_PARAMS, 0x00600408);
        WREG32(mmRLC_LB_CNTL, 0x80000004);
@@ -3571,7 +3561,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
                tmp = gfx_v7_0_halt_rlc(adev);
 
                mutex_lock(&adev->grbm_idx_mutex);
-               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
                tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3622,7 +3612,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
                tmp = gfx_v7_0_halt_rlc(adev);
 
                mutex_lock(&adev->grbm_idx_mutex);
-               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
                data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3673,7 +3663,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
                tmp = gfx_v7_0_halt_rlc(adev);
 
                mutex_lock(&adev->grbm_idx_mutex);
-               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
                WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
                data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
@@ -3851,6 +3841,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
        }
 }
 
+static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+                                                u32 bitmap)
+{
+       u32 data;
+
+       if (!bitmap)
+               return;
+
+       data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+       data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+       WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
 static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
 {
        u32 data, mask;
@@ -4107,7 +4111,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
  * Fetches a GPU clock counter snapshot (SI).
  * Returns the 64 bit clock counter snapshot.
  */
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 {
        uint64_t clock;
 
@@ -4167,12 +4171,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
+       .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
+       .select_se_sh = &gfx_v7_0_select_se_sh,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
+       .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
+       .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+};
+
 static int gfx_v7_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
        adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
+       adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
+       adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
        gfx_v7_0_set_ring_funcs(adev);
        gfx_v7_0_set_irq_funcs(adev);
        gfx_v7_0_set_gds_init(adev);
@@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle)
        gfx_v7_0_cp_compute_fini(adev);
        gfx_v7_0_rlc_fini(adev);
        gfx_v7_0_mec_fini(adev);
+       gfx_v7_0_free_microcode(adev);
 
        return 0;
 }
@@ -4816,7 +4833,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
        case 2:
                for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                        ring = &adev->gfx.compute_ring[i];
-                       if ((ring->me == me_id) & (ring->pipe == pipe_id))
+                       if ((ring->me == me_id) && (ring->pipe == pipe_id))
                                amdgpu_fence_process(ring);
                }
                break;
@@ -5015,16 +5032,22 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
        int i, j, k, counter, active_cu_number = 0;
        u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
        struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+       unsigned disable_masks[4 * 2];
 
        memset(cu_info, 0, sizeof(*cu_info));
 
+       amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
                        mask = 1;
                        ao_bitmap = 0;
                        counter = 0;
-                       gfx_v7_0_select_se_sh(adev, i, j);
+                       gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+                       if (i < 4 && j < 2)
+                               gfx_v7_0_set_user_cu_inactive_bitmap(
+                                       adev, disable_masks[i * 2 + j]);
                        bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
                        cu_info->bitmap[i][j] = bitmap;
 
@@ -5040,7 +5063,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
                        ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
                }
        }
-       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        cu_info->number = active_cu_number;
index e747aa935c88dc2342562ed5f80f316f8e952490..94e3ea147c26ebae5e31f81d07b590002f58e79b 100644 (file)
 
 extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
 
-/* XXX these shouldn't be exported */
-void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
-void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
-uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
 #endif
index f19bab68fd837ffe3cacb54ef04f9d50dc8fee27..d97b962bc3de1a16c181ba5122eb02829890980f 100644 (file)
@@ -28,6 +28,7 @@
 #include "vid.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
 #include "clearstate_vi.h"
 
 #include "gmc/gmc_8_2_d.h"
@@ -47,6 +48,8 @@
 #include "dce/dce_10_0_d.h"
 #include "dce/dce_10_0_sh_mask.h"
 
+#include "smu/smu_7_1_3_d.h"
+
 #define GFX8_NUM_GFX_RINGS     1
 #define GFX8_NUM_COMPUTE_RINGS 8
 
@@ -282,6 +285,7 @@ static const u32 golden_settings_polaris11_a11[] =
        mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
        mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
        mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+       mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 };
 
 static const u32 polaris11_golden_common_all[] =
@@ -297,7 +301,8 @@ static const u32 polaris11_golden_common_all[] =
 static const u32 golden_settings_polaris10_a11[] =
 {
        mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
-       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
+       mmCB_HW_CONTROL_2, 0, 0x0f000000,
        mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
        mmDB_DEBUG2, 0xf00fffff, 0x00000400,
        mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -311,6 +316,7 @@ static const u32 golden_settings_polaris10_a11[] =
        mmTCC_CTRL, 0x00100000, 0xf31fff7f,
        mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
        mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+       mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 };
 
 static const u32 polaris10_golden_common_all[] =
@@ -692,6 +698,11 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
                amdgpu_program_register_sequence(adev,
                                                 polaris10_golden_common_all,
                                                 (const u32)ARRAY_SIZE(polaris10_golden_common_all));
+               WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
+               if (adev->pdev->revision == 0xc7) {
+                       amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
+                       amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
+               }
                break;
        case CHIP_CARRIZO:
                amdgpu_program_register_sequence(adev,
@@ -783,7 +794,6 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
        struct fence *f = NULL;
        uint32_t scratch;
        uint32_t tmp = 0;
-       unsigned i;
        int r;
 
        r = amdgpu_gfx_scratch_get(adev, &scratch);
@@ -812,23 +822,15 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
                DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
                goto err2;
        }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32(scratch);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
-               goto err2;
+       tmp = RREG32(scratch);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
        } else {
                DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
                          scratch, tmp);
                r = -EINVAL;
        }
 err2:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err1:
@@ -836,6 +838,26 @@ err1:
        return r;
 }
 
+
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+       release_firmware(adev->gfx.pfp_fw);
+       adev->gfx.pfp_fw = NULL;
+       release_firmware(adev->gfx.me_fw);
+       adev->gfx.me_fw = NULL;
+       release_firmware(adev->gfx.ce_fw);
+       adev->gfx.ce_fw = NULL;
+       release_firmware(adev->gfx.rlc_fw);
+       adev->gfx.rlc_fw = NULL;
+       release_firmware(adev->gfx.mec_fw);
+       adev->gfx.mec_fw = NULL;
+       if ((adev->asic_type != CHIP_STONEY) &&
+           (adev->asic_type != CHIP_TOPAZ))
+               release_firmware(adev->gfx.mec2_fw);
+       adev->gfx.mec2_fw = NULL;
+
+       kfree(adev->gfx.rlc.register_list_format);
+}
+
 static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
@@ -1129,6 +1151,71 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
        buffer[count++] = cpu_to_le32(0);
 }
 
+static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+{
+       const __le32 *fw_data;
+       volatile u32 *dst_ptr;
+       int me, i, max_me = 4;
+       u32 bo_offset = 0;
+       u32 table_offset, table_size;
+
+       if (adev->asic_type == CHIP_CARRIZO)
+               max_me = 5;
+
+       /* write the cp table buffer */
+       dst_ptr = adev->gfx.rlc.cp_table_ptr;
+       for (me = 0; me < max_me; me++) {
+               if (me == 0) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.ce_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 1) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.pfp_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 2) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.me_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else if (me == 3) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               } else  if (me == 4) {
+                       const struct gfx_firmware_header_v1_0 *hdr =
+                               (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+                       fw_data = (const __le32 *)
+                               (adev->gfx.mec2_fw->data +
+                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+                       table_offset = le32_to_cpu(hdr->jt_offset);
+                       table_size = le32_to_cpu(hdr->jt_size);
+               }
+
+               for (i = 0; i < table_size; i ++) {
+                       dst_ptr[bo_offset + i] =
+                               cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+               }
+
+               bo_offset += table_size;
+       }
+}
+
 static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
 {
        int r;
@@ -1144,6 +1231,18 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
                amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
                adev->gfx.rlc.clear_state_obj = NULL;
        }
+
+       /* jump table block */
+       if (adev->gfx.rlc.cp_table_obj) {
+               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+               if (unlikely(r != 0))
+                       dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+               amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+               amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
+               adev->gfx.rlc.cp_table_obj = NULL;
+       }
 }
 
 static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
@@ -1200,6 +1299,46 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
                amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
        }
 
+       if ((adev->asic_type == CHIP_CARRIZO) ||
+           (adev->asic_type == CHIP_STONEY)) {
+               adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
+               if (adev->gfx.rlc.cp_table_obj == NULL) {
+                       r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
+                                            AMDGPU_GEM_DOMAIN_VRAM,
+                                            AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+                                            NULL, NULL,
+                                            &adev->gfx.rlc.cp_table_obj);
+                       if (r) {
+                               dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+                               return r;
+                       }
+               }
+
+               r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
+               if (unlikely(r != 0)) {
+                       dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+                       return r;
+               }
+               r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
+                                 &adev->gfx.rlc.cp_table_gpu_addr);
+               if (r) {
+                       amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+                       dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+                       return r;
+               }
+               r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
+               if (r) {
+                       dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
+                       return r;
+               }
+
+               cz_init_cp_jump_table(adev);
+
+               amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+               amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+       }
+
        return 0;
 }
 
@@ -1581,7 +1720,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
                RREG32(sec_ded_counter_registers[i]);
 
 fail:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 
@@ -1983,7 +2121,7 @@ static int gfx_v8_0_sw_fini(void *handle)
 
        gfx_v8_0_rlc_fini(adev);
 
-       kfree(adev->gfx.rlc.register_list_format);
+       gfx_v8_0_free_microcode(adev);
 
        return 0;
 }
@@ -3308,9 +3446,15 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
        }
 }
 
-void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num)
+static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
+                                 u32 se_num, u32 sh_num, u32 instance)
 {
-       u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+       u32 data;
+
+       if (instance == 0xffffffff)
+               data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
+       else
+               data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
 
        if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
@@ -3360,13 +3504,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v8_0_select_se_sh(adev, i, j);
+                       gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
                        data = gfx_v8_0_get_rb_active_bitmap(adev);
                        active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
                                               rb_bitmap_width_per_sh);
                }
        }
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        adev->gfx.config.backend_enable_mask = active_rbs;
@@ -3470,7 +3614,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
         * making sure that the following register writes will be broadcasted
         * to all the shaders
         */
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        WREG32(mmPA_SC_FIFO_SIZE,
                   (adev->gfx.config.sc_prim_fifo_size_frontend <<
@@ -3493,7 +3637,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
-                       gfx_v8_0_select_se_sh(adev, i, j);
+                       gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
                        for (k = 0; k < adev->usec_timeout; k++) {
                                if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
                                        break;
@@ -3501,7 +3645,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
                        }
                }
        }
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3662,13 +3806,13 @@ static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
        WREG32(mmRLC_SRM_CNTL, data);
 }
 
-static void polaris11_init_power_gating(struct amdgpu_device *adev)
+static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
 {
        uint32_t data;
 
        if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
-                       AMD_PG_SUPPORT_GFX_SMG |
-                       AMD_PG_SUPPORT_GFX_DMG)) {
+                             AMD_PG_SUPPORT_GFX_SMG |
+                             AMD_PG_SUPPORT_GFX_DMG)) {
                data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
                data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
                data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
@@ -3693,6 +3837,53 @@ static void polaris11_init_power_gating(struct amdgpu_device *adev)
        }
 }
 
+static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
+                                               bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
+                                                 bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+       else
+               data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
+
 static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
 {
        if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
@@ -3705,8 +3896,25 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
                gfx_v8_0_init_save_restore_list(adev);
                gfx_v8_0_enable_save_restore_machine(adev);
 
-               if (adev->asic_type == CHIP_POLARIS11)
-                       polaris11_init_power_gating(adev);
+               if ((adev->asic_type == CHIP_CARRIZO) ||
+                   (adev->asic_type == CHIP_STONEY)) {
+                       WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
+                       gfx_v8_0_init_power_gating(adev);
+                       WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
+                       if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
+                               cz_enable_sck_slow_down_on_power_up(adev, true);
+                               cz_enable_sck_slow_down_on_power_down(adev, true);
+                       } else {
+                               cz_enable_sck_slow_down_on_power_up(adev, false);
+                               cz_enable_sck_slow_down_on_power_down(adev, false);
+                       }
+                       if (adev->pg_flags & AMD_PG_SUPPORT_CP)
+                               cz_enable_cp_power_gating(adev, true);
+                       else
+                               cz_enable_cp_power_gating(adev, false);
+               } else if (adev->asic_type == CHIP_POLARIS11) {
+                       gfx_v8_0_init_power_gating(adev);
+               }
        }
 }
 
@@ -3974,11 +4182,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
                amdgpu_ring_write(ring, 0x3a00161a);
                amdgpu_ring_write(ring, 0x0000002e);
                break;
-       case CHIP_TOPAZ:
        case CHIP_CARRIZO:
                amdgpu_ring_write(ring, 0x00000002);
                amdgpu_ring_write(ring, 0x00000000);
                break;
+       case CHIP_TOPAZ:
+               amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
+                               0x00000000 : 0x00000002);
+               amdgpu_ring_write(ring, 0x00000000);
+               break;
        case CHIP_STONEY:
                amdgpu_ring_write(ring, 0x00000000);
                amdgpu_ring_write(ring, 0x00000000);
@@ -4941,7 +5153,7 @@ static int gfx_v8_0_soft_reset(void *handle)
  * Fetches a GPU clock counter snapshot.
  * Returns the 64 bit clock counter snapshot.
  */
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
+static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 {
        uint64_t clock;
 
@@ -5001,12 +5213,18 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
 }
 
+static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
+       .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
+       .select_se_sh = &gfx_v8_0_select_se_sh,
+};
+
 static int gfx_v8_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
        adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
+       adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
        gfx_v8_0_set_ring_funcs(adev);
        gfx_v8_0_set_irq_funcs(adev);
        gfx_v8_0_set_gds_init(adev);
@@ -5039,51 +5257,43 @@ static int gfx_v8_0_late_init(void *handle)
        return 0;
 }
 
-static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
-               bool enable)
+static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+                                                      bool enable)
 {
        uint32_t data, temp;
 
-       /* Send msg to SMU via Powerplay */
-       amdgpu_set_powergating_state(adev,
-                       AMD_IP_BLOCK_TYPE_SMC,
-                       enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+       if (adev->asic_type == CHIP_POLARIS11)
+               /* Send msg to SMU via Powerplay */
+               amdgpu_set_powergating_state(adev,
+                                            AMD_IP_BLOCK_TYPE_SMC,
+                                            enable ?
+                                            AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
 
-       if (enable) {
-               /* Enable static MGPG */
-               temp = data = RREG32(mmRLC_PG_CNTL);
+       temp = data = RREG32(mmRLC_PG_CNTL);
+       /* Enable static MGPG */
+       if (enable)
                data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
-
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       } else {
-               temp = data = RREG32(mmRLC_PG_CNTL);
+       else
                data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
 
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       }
+       if (temp != data)
+               WREG32(mmRLC_PG_CNTL, data);
 }
 
-static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
-               bool enable)
+static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+                                                       bool enable)
 {
        uint32_t data, temp;
 
-       if (enable) {
-               /* Enable dynamic MGPG */
-               temp = data = RREG32(mmRLC_PG_CNTL);
+       temp = data = RREG32(mmRLC_PG_CNTL);
+       /* Enable dynamic MGPG */
+       if (enable)
                data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
-
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       } else {
-               temp = data = RREG32(mmRLC_PG_CNTL);
+       else
                data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
 
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       }
+       if (temp != data)
+               WREG32(mmRLC_PG_CNTL, data);
 }
 
 static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
@@ -5091,19 +5301,63 @@ static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *ade
 {
        uint32_t data, temp;
 
-       if (enable) {
-               /* Enable quick PG */
-               temp = data = RREG32(mmRLC_PG_CNTL);
-               data |= 0x100000;
+       temp = data = RREG32(mmRLC_PG_CNTL);
+       /* Enable quick PG */
+       if (enable)
+               data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
 
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
-       } else {
-               temp = data = RREG32(mmRLC_PG_CNTL);
-               data &= ~0x100000;
+       if (temp != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
 
-               if (temp != data)
-                       WREG32(mmRLC_PG_CNTL, data);
+static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
+                                         bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+}
+
+static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
+                                               bool enable)
+{
+       u32 data, orig;
+
+       orig = data = RREG32(mmRLC_PG_CNTL);
+
+       if (enable)
+               data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+       else
+               data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+
+       if (orig != data)
+               WREG32(mmRLC_PG_CNTL, data);
+
+       /* Read any GFX register to wake up GFX. */
+       if (!enable)
+               data = RREG32(mmDB_RENDER_CONTROL);
+}
+
+static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
+                                         bool enable)
+{
+       if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
+               cz_enable_gfx_cg_power_gating(adev, true);
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
+                       cz_enable_gfx_pipeline_power_gating(adev, true);
+       } else {
+               cz_enable_gfx_cg_power_gating(adev, false);
+               cz_enable_gfx_pipeline_power_gating(adev, false);
        }
 }
 
@@ -5111,21 +5365,42 @@ static int gfx_v8_0_set_powergating_state(void *handle,
                                          enum amd_powergating_state state)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
 
        if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
                return 0;
 
        switch (adev->asic_type) {
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
+               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
+                       cz_update_gfx_cg_power_gating(adev, enable);
+
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+                       gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+               else
+                       gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+                       gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
+               else
+                       gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+               break;
        case CHIP_POLARIS11:
-               if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)
-                       polaris11_enable_gfx_static_mg_power_gating(adev,
-                                       state == AMD_PG_STATE_GATE ? true : false);
-               else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)
-                       polaris11_enable_gfx_dynamic_mg_power_gating(adev,
-                                       state == AMD_PG_STATE_GATE ? true : false);
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
+                       gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
+               else
+                       gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
+
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
+                       gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
                else
-                       polaris11_enable_gfx_quick_mg_power_gating(adev,
-                                       state == AMD_PG_STATE_GATE ? true : false);
+                       gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
+
+               if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
+                       polaris11_enable_gfx_quick_mg_power_gating(adev, true);
+               else
+                       polaris11_enable_gfx_quick_mg_power_gating(adev, false);
                break;
        default:
                break;
@@ -5139,7 +5414,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
 {
        uint32_t data;
 
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
 
        WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
        WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
@@ -5527,6 +5802,8 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
                        WREG32(mmRLC_CGCG_CGLS_CTRL, data);
        }
 
+       gfx_v8_0_wait_for_rlc_serdes(adev);
+
        adev->gfx.rlc.funcs->exit_safe_mode(adev);
 }
 static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -5652,17 +5929,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                      unsigned vm_id, bool ctx_switch)
 {
        u32 header, control = 0;
-       u32 next_rptr = ring->wptr + 5;
-
-       if (ctx_switch)
-               next_rptr += 2;
-
-       next_rptr += 4;
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
        if (ctx_switch) {
@@ -5691,23 +5957,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                          struct amdgpu_ib *ib,
                                          unsigned vm_id, bool ctx_switch)
 {
-       u32 header, control = 0;
-       u32 next_rptr = ring->wptr + 5;
-
-       control |= INDIRECT_BUFFER_VALID;
-
-       next_rptr += 4;
-       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
-       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
-       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
-       amdgpu_ring_write(ring, next_rptr);
+       u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
 
-       header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
-
-       control |= ib->length_dw | (vm_id << 24);
-
-       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        amdgpu_ring_write(ring,
 #ifdef __BIG_ENDIAN
                                          (2 << 0) |
@@ -6160,9 +6412,9 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
 {
        switch (adev->asic_type) {
        case CHIP_TOPAZ:
-       case CHIP_STONEY:
                adev->gfx.rlc.funcs = &iceland_rlc_funcs;
                break;
+       case CHIP_STONEY:
        case CHIP_CARRIZO:
                adev->gfx.rlc.funcs = &cz_rlc_funcs;
                break;
@@ -6200,6 +6452,20 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
        }
 }
 
+static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
+                                                u32 bitmap)
+{
+       u32 data;
+
+       if (!bitmap)
+               return;
+
+       data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
+       data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
+
+       WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
+}
+
 static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
 {
        u32 data, mask;
@@ -6220,16 +6486,22 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
        int i, j, k, counter, active_cu_number = 0;
        u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
        struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+       unsigned disable_masks[4 * 2];
 
        memset(cu_info, 0, sizeof(*cu_info));
 
+       amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
+
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
                        mask = 1;
                        ao_bitmap = 0;
                        counter = 0;
-                       gfx_v8_0_select_se_sh(adev, i, j);
+                       gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
+                       if (i < 4 && j < 2)
+                               gfx_v8_0_set_user_cu_inactive_bitmap(
+                                       adev, disable_masks[i * 2 + j]);
                        bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
                        cu_info->bitmap[i][j] = bitmap;
 
@@ -6245,7 +6517,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
                        ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
                }
        }
-       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+       gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        cu_info->number = active_cu_number;
index 16a49f53a2fa2aecbf78296e87100dcd36426647..bc82c794312ca88c89bdd89cfe665c85aec74214 100644 (file)
@@ -26,7 +26,6 @@
 
 extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
 
-uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
 void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
 
 #endif
index 1feb6439cb0b7fd4768a232f5ad7520df0716c2a..d24a82bd0c7a119d7b450ee801f3b5ffa1dc51e3 100644 (file)
@@ -39,6 +39,7 @@
 
 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v7_0_wait_for_idle(void *handle);
 
 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
@@ -73,39 +74,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
-/**
- * gmc7_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
-       unsigned i;
-       u32 tmp;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               /* read MC_STATUS */
-               tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
-               if (!tmp)
-                       return 0;
-               udelay(1);
-       }
-       return -1;
-}
-
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
-                     struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
+                            struct amdgpu_mode_mc_save *save)
 {
        u32 blackout;
 
        if (adev->mode_info.num_crtc)
                amdgpu_display_stop_mc_access(adev, save);
 
-       amdgpu_asic_wait_for_mc_idle(adev);
+       gmc_v7_0_wait_for_idle((void *)adev);
 
        blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
        if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -120,8 +97,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
        udelay(100);
 }
 
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
-                       struct amdgpu_mode_mc_save *save)
+static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
+                              struct amdgpu_mode_mc_save *save)
 {
        u32 tmp;
 
@@ -311,7 +288,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
                amdgpu_display_set_vga_render_state(adev, false);
 
        gmc_v7_0_mc_stop(adev, &save);
-       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+       if (gmc_v7_0_wait_for_idle((void *)adev)) {
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        /* Update configuration */
@@ -331,7 +308,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
        WREG32(mmMC_VM_AGP_BASE, 0);
        WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
        WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
-       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+       if (gmc_v7_0_wait_for_idle((void *)adev)) {
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        gmc_v7_0_mc_resume(adev, &save);
@@ -1137,7 +1114,7 @@ static int gmc_v7_0_soft_reset(void *handle)
 
        if (srbm_soft_reset) {
                gmc_v7_0_mc_stop(adev, &save);
-               if (gmc_v7_0_wait_for_idle(adev)) {
+               if (gmc_v7_0_wait_for_idle((void *)adev)) {
                        dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
                }
 
index 36fcbbc46adae49eeffe708a2e27dacec34c40d6..0b386b5d2f7aabdc3b2fd89ca625733049fa7a89 100644 (file)
 
 extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
 
-/* XXX these shouldn't be exported */
-void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
-                     struct amdgpu_mode_mc_save *save);
-void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
-                       struct amdgpu_mode_mc_save *save);
-int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
 #endif
index 9945d5bbf1fe06244bf950167f09589b266b2a23..717359d3ba8c506b77b09d46663b7afc598e9df9 100644 (file)
@@ -41,6 +41,7 @@
 
 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
+static int gmc_v8_0_wait_for_idle(void *handle);
 
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
@@ -147,44 +148,15 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
-/**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Wait for the MC (memory controller) to be idle.
- * (evergreen+).
- * Returns 0 if the MC is idle, -1 if not.
- */
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
-{
-       unsigned i;
-       u32 tmp;
-
-       for (i = 0; i < adev->usec_timeout; i++) {
-               /* read MC_STATUS */
-               tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
-                                              SRBM_STATUS__MCB_BUSY_MASK |
-                                              SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
-                                              SRBM_STATUS__MCC_BUSY_MASK |
-                                              SRBM_STATUS__MCD_BUSY_MASK |
-                                              SRBM_STATUS__VMC1_BUSY_MASK);
-               if (!tmp)
-                       return 0;
-               udelay(1);
-       }
-       return -1;
-}
-
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
-                     struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
+                            struct amdgpu_mode_mc_save *save)
 {
        u32 blackout;
 
        if (adev->mode_info.num_crtc)
                amdgpu_display_stop_mc_access(adev, save);
 
-       amdgpu_asic_wait_for_mc_idle(adev);
+       gmc_v8_0_wait_for_idle(adev);
 
        blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
        if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -199,8 +171,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
        udelay(100);
 }
 
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
-                       struct amdgpu_mode_mc_save *save)
+static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
+                              struct amdgpu_mode_mc_save *save)
 {
        u32 tmp;
 
@@ -393,7 +365,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
                amdgpu_display_set_vga_render_state(adev, false);
 
        gmc_v8_0_mc_stop(adev, &save);
-       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+       if (gmc_v8_0_wait_for_idle((void *)adev)) {
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        /* Update configuration */
@@ -413,7 +385,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
        WREG32(mmMC_VM_AGP_BASE, 0);
        WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
        WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
-       if (amdgpu_asic_wait_for_mc_idle(adev)) {
+       if (gmc_v8_0_wait_for_idle((void *)adev)) {
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        gmc_v8_0_mc_resume(adev, &save);
@@ -1140,7 +1112,7 @@ static int gmc_v8_0_soft_reset(void *handle)
 
        if (srbm_soft_reset) {
                gmc_v8_0_mc_stop(adev, &save);
-               if (gmc_v8_0_wait_for_idle(adev)) {
+               if (gmc_v8_0_wait_for_idle((void *)adev)) {
                        dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
                }
 
index 973436086b380f966e83b56ef58b74a665ba21ce..fc5001a8119dabab06e0e45888c6670b48163e77 100644 (file)
 
 extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
 
-/* XXX these shouldn't be exported */
-void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
-                     struct amdgpu_mode_mc_save *save);
-void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
-                       struct amdgpu_mode_mc_save *save);
-int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev);
-
 #endif
index 460bc8ad37e6252a910c068e5114bcc687350faf..2f078ad6095caa946393d0de03b40d83e5f622b4 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/firmware.h>
 #include "drmP.h"
 #include "amdgpu.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
 
 MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
 
@@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
 
 static int iceland_dpm_sw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index 52ee08193295f434c46fb7f89e082b20df46abfd..528571285e69647eb7c73ea89c1b333575420fed 100644 (file)
@@ -25,7 +25,7 @@
 #include "drmP.h"
 #include "amdgpu.h"
 #include "ppsmc.h"
-#include "iceland_smumgr.h"
+#include "iceland_smum.h"
 #include "smu_ucode_xfer_vi.h"
 #include "amdgpu_ucode.h"
 
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
new file mode 100644 (file)
index 0000000..1e0769e
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef ICELAND_SMUMGR_H
+#define ICELAND_SMUMGR_H
+
+#include "ppsmc.h"
+
+extern int iceland_smu_init(struct amdgpu_device *adev);
+extern int iceland_smu_fini(struct amdgpu_device *adev);
+extern int iceland_smu_start(struct amdgpu_device *adev);
+
+struct iceland_smu_private_data
+{
+       uint8_t *header;
+       uint8_t *mec_image;
+       uint32_t header_addr_high;
+       uint32_t header_addr_low;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h
deleted file mode 100644 (file)
index 1e0769e..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef ICELAND_SMUMGR_H
-#define ICELAND_SMUMGR_H
-
-#include "ppsmc.h"
-
-extern int iceland_smu_init(struct amdgpu_device *adev);
-extern int iceland_smu_fini(struct amdgpu_device *adev);
-extern int iceland_smu_start(struct amdgpu_device *adev);
-
-struct iceland_smu_private_data
-{
-       uint8_t *header;
-       uint8_t *mec_image;
-       uint32_t header_addr_high;
-       uint32_t header_addr_low;
-};
-
-#endif
index a789a863d677faf641bb626022123ec20a46082f..a845e883f5fa560a18340517edbaa2df8080cf30 100644 (file)
@@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
        vid_mapping_table->num_entries = i;
 }
 
+#if 0
 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
 {
        {  0,       4,        1    },
@@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
 {
        { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
 };
+#endif
 
 static const struct kv_pt_config_reg didt_config_kv[] =
 {
@@ -507,19 +509,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
            pi->caps_db_ramping ||
            pi->caps_td_ramping ||
            pi->caps_tcp_ramping) {
-               gfx_v7_0_enter_rlc_safe_mode(adev);
+               adev->gfx.rlc.funcs->enter_safe_mode(adev);
 
                if (enable) {
                        ret = kv_program_pt_config_registers(adev, didt_config_kv);
                        if (ret) {
-                               gfx_v7_0_exit_rlc_safe_mode(adev);
+                               adev->gfx.rlc.funcs->exit_safe_mode(adev);
                                return ret;
                        }
                }
 
                kv_do_enable_didt(adev, enable);
 
-               gfx_v7_0_exit_rlc_safe_mode(adev);
+               adev->gfx.rlc.funcs->exit_safe_mode(adev);
        }
 
        return 0;
index 7837f2ecc3570167392199e2fa886cda6bc86cc0..8463245f424ff75da034dea697cfc9e8f759a29b 100644 (file)
@@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_StartFanControl               ((uint8_t)0x5B)
 #define PPSMC_StopFanControl                ((uint8_t)0x5C)
 #define PPSMC_MSG_NoDisplay                 ((uint8_t)0x5D)
+#define PPSMC_NoDisplay                     ((uint8_t)0x5D)
 #define PPSMC_MSG_HasDisplay                ((uint8_t)0x5E)
+#define PPSMC_HasDisplay                    ((uint8_t)0x5E)
 #define PPSMC_MSG_UVDPowerOFF               ((uint8_t)0x60)
 #define PPSMC_MSG_UVDPowerON                ((uint8_t)0x61)
 #define PPSMC_MSG_EnableULV                 ((uint8_t)0x62)
@@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_MSG_DisableDTE                ((uint8_t)0x88)
 #define PPSMC_MSG_ThrottleOVRDSCLKDS        ((uint8_t)0x96)
 #define PPSMC_MSG_CancelThrottleOVRDSCLKDS  ((uint8_t)0x97)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt   ((uint16_t) 0x149)
 
 /* CI/KV/KB */
 #define PPSMC_MSG_UVDDPM_SetEnabledMask       ((uint16_t) 0x12D)
@@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result;
 #define PPSMC_MSG_MASTER_DeepSleep_OFF        ((uint16_t) 0x190)
 #define PPSMC_MSG_Remove_DC_Clamp             ((uint16_t) 0x191)
 #define PPSMC_MSG_SetFanPwmMax                ((uint16_t) 0x19A)
+#define PPSMC_MSG_SetFanRpmMax                ((uint16_t) 0x205)
 
 #define PPSMC_MSG_ENABLE_THERMAL_DPM          ((uint16_t) 0x19C)
 #define PPSMC_MSG_DISABLE_THERMAL_DPM         ((uint16_t) 0x19D)
index f4c3130d3fdb20eee732b7c4e6e123ca008906a1..0111d153411b572e156652ec89b2cc6cc89cdaca 100644 (file)
@@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
+static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
+{
+       int i;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               release_firmware(adev->sdma.instance[i].fw);
+               adev->sdma.instance[i].fw = NULL;
+       }
+}
+
 /**
  * sdma_v2_4_init_microcode - load ucode images from disk
  *
@@ -246,19 +255,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
                                   unsigned vm_id, bool ctx_switch)
 {
        u32 vmid = vm_id & 0xf;
-       u32 next_rptr = ring->wptr + 5;
-
-       while ((next_rptr & 7) != 2)
-               next_rptr++;
-
-       next_rptr += 6;
-
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
-       amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
-       amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
-       amdgpu_ring_write(ring, next_rptr);
 
        /* IB packet must end on a 8 DW boundary */
        sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -461,6 +457,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
                /* set the wb address whether it's enabled or not */
                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -489,7 +487,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
 
+       sdma_v2_4_enable(adev, true);
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -565,23 +567,25 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
 {
        int r;
 
-       if (!adev->firmware.smu_load) {
-               r = sdma_v2_4_load_microcode(adev);
-               if (r)
-                       return r;
-       } else {
-               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                               AMDGPU_UCODE_ID_SDMA0);
-               if (r)
-                       return -EINVAL;
-               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                               AMDGPU_UCODE_ID_SDMA1);
-               if (r)
-                       return -EINVAL;
+       if (!adev->pp_enabled) {
+               if (!adev->firmware.smu_load) {
+                       r = sdma_v2_4_load_microcode(adev);
+                       if (r)
+                               return r;
+               } else {
+                       r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                                       AMDGPU_UCODE_ID_SDMA0);
+                       if (r)
+                               return -EINVAL;
+                       r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                                       AMDGPU_UCODE_ID_SDMA1);
+                       if (r)
+                               return -EINVAL;
+               }
        }
 
-       /* unhalt the MEs */
-       sdma_v2_4_enable(adev, true);
+       /* halt the engine before programing */
+       sdma_v2_4_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v2_4_gfx_resume(adev);
@@ -669,7 +673,6 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct fence *f = NULL;
-       unsigned i;
        unsigned index;
        int r;
        u32 tmp = 0;
@@ -711,23 +714,15 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
                DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
                goto err1;
        }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = le32_to_cpu(adev->wb.wb[index]);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
-               goto err1;
+       tmp = le32_to_cpu(adev->wb.wb[index]);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
        } else {
                DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
                r = -EINVAL;
        }
 
 err1:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err0:
@@ -1012,6 +1007,7 @@ static int sdma_v2_4_sw_fini(void *handle)
        for (i = 0; i < adev->sdma.num_instances; i++)
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+       sdma_v2_4_free_microcode(adev);
        return 0;
 }
 
index 31d99b0010f79a032540b06cf0bc3c04d498f78c..2b8ce5808a14ac36c9a4ee8992116cd4a25040cc 100644 (file)
@@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
        }
 }
 
+static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
+{
+       int i;
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               release_firmware(adev->sdma.instance[i].fw);
+               adev->sdma.instance[i].fw = NULL;
+       }
+}
+
 /**
  * sdma_v3_0_init_microcode - load ucode images from disk
  *
@@ -406,18 +415,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
                                   unsigned vm_id, bool ctx_switch)
 {
        u32 vmid = vm_id & 0xf;
-       u32 next_rptr = ring->wptr + 5;
-
-       while ((next_rptr & 7) != 2)
-               next_rptr++;
-       next_rptr += 6;
-
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
-                         SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
-       amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
-       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
-       amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
-       amdgpu_ring_write(ring, next_rptr);
 
        /* IB packet must end on a 8 DW boundary */
        sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -672,6 +669,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                /* Initialize the ring buffer's read and write pointers */
                WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
                WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+               WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
 
                /* set the wb address whether it's enabled or not */
                WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -711,7 +710,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
                WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
 
                ring->ready = true;
+       }
 
+       /* unhalt the MEs */
+       sdma_v3_0_enable(adev, true);
+       /* enable sdma ring preemption */
+       sdma_v3_0_ctx_switch_enable(adev, true);
+
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               ring = &adev->sdma.instance[i].ring;
                r = amdgpu_ring_test_ring(ring);
                if (r) {
                        ring->ready = false;
@@ -804,10 +811,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       /* unhalt the MEs */
-       sdma_v3_0_enable(adev, true);
-       /* enable sdma ring preemption */
-       sdma_v3_0_ctx_switch_enable(adev, true);
+       /* disble sdma engine before programing it */
+       sdma_v3_0_ctx_switch_enable(adev, false);
+       sdma_v3_0_enable(adev, false);
 
        /* start the gfx rings and rlc compute queues */
        r = sdma_v3_0_gfx_resume(adev);
@@ -895,7 +901,6 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct fence *f = NULL;
-       unsigned i;
        unsigned index;
        int r;
        u32 tmp = 0;
@@ -937,22 +942,14 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
                DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
                goto err1;
        }
-       for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = le32_to_cpu(adev->wb.wb[index]);
-               if (tmp == 0xDEADBEEF)
-                       break;
-               DRM_UDELAY(1);
-       }
-       if (i < adev->usec_timeout) {
-               DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
-                        ring->idx, i);
-               goto err1;
+       tmp = le32_to_cpu(adev->wb.wb[index]);
+       if (tmp == 0xDEADBEEF) {
+               DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
        } else {
                DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
                r = -EINVAL;
        }
 err1:
-       fence_put(f);
        amdgpu_ib_free(adev, &ib, NULL);
        fence_put(f);
 err0:
@@ -1247,6 +1244,7 @@ static int sdma_v3_0_sw_fini(void *handle)
        for (i = 0; i < adev->sdma.num_instances; i++)
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
 
+       sdma_v3_0_free_microcode(adev);
        return 0;
 }
 
index b7615cefcac40b5d2607a194b5184ed2fe3a9bcf..f06f6f4dc3a8a93730a38050d269d6df00e3cc1e 100644 (file)
@@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
 
 static int tonga_dpm_sw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       release_firmware(adev->pm.fw);
+       adev->pm.fw = NULL;
+
        return 0;
 }
 
index f07551476a70db5ba2fd4b7c6c3eb3f87a2889b1..416c8567d3ed1054c2b4be75b8f83d57b9da5604 100644 (file)
@@ -34,6 +34,8 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 
+#include "bif/bif_4_1_d.h"
+
 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
 static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -438,6 +440,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
        amdgpu_ring_write(ring, 2);
 }
 
+/**
+ * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+       amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+       amdgpu_ring_write(ring, 1);
+}
+
 /**
  * uvd_v4_2_ring_test_ring - register write test
  *
@@ -763,6 +791,8 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
        .parse_cs = amdgpu_uvd_ring_parse_cs,
        .emit_ib = uvd_v4_2_ring_emit_ib,
        .emit_fence = uvd_v4_2_ring_emit_fence,
+       .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
        .test_ring = uvd_v4_2_ring_test_ring,
        .test_ib = uvd_v4_2_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
index e0a76a883d46bab43513df7b3e1b5f9e3cda90dd..dd636c4c4b089d68593f1ffbddd05fab452d662c 100644 (file)
@@ -31,6 +31,7 @@
 #include "uvd/uvd_5_0_sh_mask.h"
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
+#include "bif/bif_5_0_d.h"
 #include "vi.h"
 
 static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -488,6 +489,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
        amdgpu_ring_write(ring, 2);
 }
 
+/**
+ * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+       amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+       amdgpu_ring_write(ring, 1);
+}
+
 /**
  * uvd_v5_0_ring_test_ring - register write test
  *
@@ -815,6 +842,8 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
        .parse_cs = amdgpu_uvd_ring_parse_cs,
        .emit_ib = uvd_v5_0_ring_emit_ib,
        .emit_fence = uvd_v5_0_ring_emit_fence,
+       .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
        .test_ring = uvd_v5_0_ring_test_ring,
        .test_ib = uvd_v5_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
index c9929d665c015195837cf1dc587b4e8c5ee371ba..07e9a987fbee413f6d21d9323dbd8045b7ac4d97 100644 (file)
@@ -33,6 +33,7 @@
 #include "oss/oss_2_0_sh_mask.h"
 #include "smu/smu_7_1_3_d.h"
 #include "smu/smu_7_1_3_sh_mask.h"
+#include "bif/bif_5_1_d.h"
 #include "vi.h"
 
 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -385,8 +386,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        uint32_t mp_swap_cntl;
        int i, j, r;
 
-       /*disable DPG */
-       WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
+       /* disable DPG */
+       WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
 
        /* disable byte swapping */
        lmi_swap_cntl = 0;
@@ -405,17 +406,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        }
 
        /* disable interupt */
-       WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
+       WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
 
        /* stall UMC and register bus before resetting VCPU */
-       WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+       WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
        mdelay(1);
 
        /* put LMI, VCPU, RBC etc... into reset */
-       WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
-               UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
-               UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
-               UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
+       WREG32(mmUVD_SOFT_RESET,
+               UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
+               UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
                UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
        mdelay(5);
 
@@ -424,8 +429,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        mdelay(5);
 
        /* initialize UVD memory controller */
-       WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
-                            (1 << 21) | (1 << 9) | (1 << 20));
+       WREG32(mmUVD_LMI_CTRL,
+               (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
+               UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
+               UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
+               UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
+               UVD_LMI_CTRL__REQ_MODE_MASK |
+               UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
 
 #ifdef __BIG_ENDIAN
        /* swap (8 in 32) RB and IB */
@@ -447,10 +457,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        mdelay(5);
 
        /* enable VCPU clock */
-       WREG32(mmUVD_VCPU_CNTL,  1 << 9);
+       WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
 
        /* enable UMC */
-       WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+       WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
 
        /* boot up the VCPU */
        WREG32(mmUVD_SOFT_RESET, 0);
@@ -484,10 +494,12 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
                return r;
        }
        /* enable master interrupt */
-       WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
+       WREG32_P(mmUVD_MASTINT_EN,
+               (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
+               ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
 
        /* clear the bit 4 of UVD_STATUS */
-       WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
+       WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
 
        rb_bufsz = order_base_2(ring->ring_size);
        tmp = 0;
@@ -580,6 +592,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
        amdgpu_ring_write(ring, 2);
 }
 
+/**
+ * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp flush.
+ */
+static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+       amdgpu_ring_write(ring, 0);
+}
+
+/**
+ * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Emits an hdp invalidate.
+ */
+static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
+{
+       amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
+       amdgpu_ring_write(ring, 1);
+}
+
 /**
  * uvd_v6_0_ring_test_ring - register write test
  *
@@ -847,7 +885,8 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
        static int curstate = -1;
 
-       if (adev->asic_type == CHIP_FIJI)
+       if (adev->asic_type == CHIP_FIJI ||
+                       adev->asic_type == CHIP_POLARIS10)
                uvd_v6_set_bypass_mode(adev, enable);
 
        if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
@@ -919,6 +958,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
        .parse_cs = amdgpu_uvd_ring_parse_cs,
        .emit_ib = uvd_v6_0_ring_emit_ib,
        .emit_fence = uvd_v6_0_ring_emit_fence,
+       .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
+       .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
        .test_ring = uvd_v6_0_ring_test_ring,
        .test_ib = uvd_v6_0_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
index 30e8099e94c5629ca6d7703526b20bf0a715267c..d7b8da433fe29505b242e3fa81c0694b3f43fd80 100644 (file)
@@ -43,6 +43,7 @@
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0        0x8616
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1        0x8617
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2        0x8618
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK  0x02
 
 #define VCE_V3_0_FW_SIZE       (384 * 1024)
 #define VCE_V3_0_STACK_SIZE    (64 * 1024)
@@ -51,6 +52,7 @@
 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
+static int vce_v3_0_wait_for_idle(void *handle);
 
 /**
  * vce_v3_0_ring_get_rptr - get read pointer
@@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
        vce_v3_0_override_vce_clock_gating(adev, false);
 }
 
+static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
+{
+       int i, j;
+       uint32_t status = 0;
+
+       for (i = 0; i < 10; ++i) {
+               for (j = 0; j < 100; ++j) {
+                       status = RREG32(mmVCE_STATUS);
+                       if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+                               return 0;
+                       mdelay(10);
+               }
+
+               DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
+               WREG32_P(mmVCE_SOFT_RESET,
+                       VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+                       ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               mdelay(10);
+               WREG32_P(mmVCE_SOFT_RESET, 0,
+                       ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+               mdelay(10);
+       }
+
+       return -ETIMEDOUT;
+}
+
 /**
  * vce_v3_0_start - start VCE block
  *
@@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
 static int vce_v3_0_start(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring;
-       int idx, i, j, r;
+       int idx, r;
+
+       ring = &adev->vce.ring[0];
+       WREG32(mmVCE_RB_RPTR, ring->wptr);
+       WREG32(mmVCE_RB_WPTR, ring->wptr);
+       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
+       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+       ring = &adev->vce.ring[1];
+       WREG32(mmVCE_RB_RPTR2, ring->wptr);
+       WREG32(mmVCE_RB_WPTR2, ring->wptr);
+       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
+       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
-
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
@@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 
                vce_v3_0_mc_resume(adev, idx);
 
-               /* set BUSY flag */
-               WREG32_P(mmVCE_STATUS, 1, ~1);
+               WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
+                        ~VCE_STATUS__JOB_BUSY_MASK);
+
                if (adev->asic_type >= CHIP_STONEY)
                        WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
                else
                        WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
                                ~VCE_VCPU_CNTL__CLK_EN_MASK);
 
-               WREG32_P(mmVCE_SOFT_RESET,
-                        VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-
-               mdelay(100);
-
                WREG32_P(mmVCE_SOFT_RESET, 0,
                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
 
-               for (i = 0; i < 10; ++i) {
-                       uint32_t status;
-                       for (j = 0; j < 100; ++j) {
-                               status = RREG32(mmVCE_STATUS);
-                               if (status & 2)
-                                       break;
-                               mdelay(10);
-                       }
-                       r = 0;
-                       if (status & 2)
-                               break;
-
-                       DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
-                       WREG32_P(mmVCE_SOFT_RESET,
-                               VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
-                               ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-                       mdelay(10);
-                       WREG32_P(mmVCE_SOFT_RESET, 0,
-                               ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
-                       mdelay(10);
-                       r = -1;
-               }
+               mdelay(100);
+
+               r = vce_v3_0_firmware_loaded(adev);
 
                /* clear BUSY flag */
-               WREG32_P(mmVCE_STATUS, 0, ~1);
+               WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
 
                /* Set Clock-Gating off */
                if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
@@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
        mutex_unlock(&adev->grbm_idx_mutex);
 
-       ring = &adev->vce.ring[0];
-       WREG32(mmVCE_RB_RPTR, ring->wptr);
-       WREG32(mmVCE_RB_WPTR, ring->wptr);
-       WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+       return 0;
+}
 
-       ring = &adev->vce.ring[1];
-       WREG32(mmVCE_RB_RPTR2, ring->wptr);
-       WREG32(mmVCE_RB_WPTR2, ring->wptr);
-       WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
-       WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
-       WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+static int vce_v3_0_stop(struct amdgpu_device *adev)
+{
+       int idx;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+       for (idx = 0; idx < 2; ++idx) {
+               if (adev->vce.harvest_config & (1 << idx))
+                       continue;
+
+               if (idx == 0)
+                       WREG32_P(mmGRBM_GFX_INDEX, 0,
+                               ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+               else
+                       WREG32_P(mmGRBM_GFX_INDEX,
+                               GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
+                               ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+
+               if (adev->asic_type >= CHIP_STONEY)
+                       WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
+               else
+                       WREG32_P(mmVCE_VCPU_CNTL, 0,
+                               ~VCE_VCPU_CNTL__CLK_EN_MASK);
+               /* hold on ECPU */
+               WREG32_P(mmVCE_SOFT_RESET,
+                        VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+                        ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+
+               /* clear BUSY flag */
+               WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
+
+               /* Set Clock-Gating off */
+               if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
+                       vce_v3_0_set_vce_sw_clock_gating(adev, false);
+       }
+
+       WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
+       mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
 }
@@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle)
 
 static int vce_v3_0_hw_fini(void *handle)
 {
-       return 0;
+       int r;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       r = vce_v3_0_wait_for_idle(handle);
+       if (r)
+               return r;
+
+       return vce_v3_0_stop(adev);
 }
 
 static int vce_v3_0_suspend(void *handle)
index 2c88d0b66cf396b35016b169c04091feaa2cc6e5..03a31c53aec3d18e67b42cbadb4ab30f9e82c766 100644 (file)
@@ -203,6 +203,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
        spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
 }
 
+static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+       WREG32(mmGC_CAC_IND_INDEX, (reg));
+       r = RREG32(mmGC_CAC_IND_DATA);
+       spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+       return r;
+}
+
+static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
+       WREG32(mmGC_CAC_IND_INDEX, (reg));
+       WREG32(mmGC_CAC_IND_DATA, (v));
+       spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
+}
+
+
 static const u32 tonga_mgcg_cgcg_init[] =
 {
        mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
@@ -421,6 +444,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
+static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+{
+       u32 caps = 0;
+       u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+
+       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
+               caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
+
+       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
+               caps |= AMDGPU_VIRT_CAPS_IS_VF;
+
+       return caps;
+}
+
 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
        {mmGB_MACROTILE_MODE7, true},
 };
@@ -519,12 +556,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
 
        mutex_lock(&adev->grbm_idx_mutex);
        if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               gfx_v8_0_select_se_sh(adev, se_num, sh_num);
+               amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
 
        val = RREG32(reg_offset);
 
        if (se_num != 0xffffffff || sh_num != 0xffffffff)
-               gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
        return val;
 }
@@ -583,7 +620,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
        return -EINVAL;
 }
 
-static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 {
        u32 i;
 
@@ -598,11 +635,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
 
        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
-               if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
-                       break;
+               if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
+                       /* enable BM */
+                       pci_set_master(adev->pdev);
+                       return 0;
+               }
                udelay(1);
        }
-
+       return -EINVAL;
 }
 
 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -628,13 +668,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun
  */
 static int vi_asic_reset(struct amdgpu_device *adev)
 {
+       int r;
+
        vi_set_bios_scratch_engine_hung(adev, true);
 
-       vi_gpu_pci_config_reset(adev);
+       r = vi_gpu_pci_config_reset(adev);
 
        vi_set_bios_scratch_engine_hung(adev, false);
 
-       return 0;
+       return r;
 }
 
 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -1118,9 +1160,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
        .get_xclk = &vi_get_xclk,
        .set_uvd_clocks = &vi_set_uvd_clocks,
        .set_vce_clocks = &vi_set_vce_clocks,
-       /* these should be moved to their own ip modules */
-       .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
-       .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
+       .get_virtual_caps = &vi_get_virtual_caps,
 };
 
 static int vi_common_early_init(void *handle)
@@ -1141,6 +1181,8 @@ static int vi_common_early_init(void *handle)
        adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
        adev->didt_rreg = &vi_didt_rreg;
        adev->didt_wreg = &vi_didt_wreg;
+       adev->gc_cac_rreg = &vi_gc_cac_rreg;
+       adev->gc_cac_wreg = &vi_gc_cac_wreg;
 
        adev->asic_funcs = &vi_asic_funcs;
 
@@ -1214,12 +1256,18 @@ static int vi_common_early_init(void *handle)
                adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
                        AMD_CG_SUPPORT_GFX_MGCG |
                        AMD_CG_SUPPORT_GFX_MGLS |
+                       AMD_CG_SUPPORT_GFX_RLC_LS |
+                       AMD_CG_SUPPORT_GFX_CP_LS |
+                       AMD_CG_SUPPORT_GFX_CGTS |
+                       AMD_CG_SUPPORT_GFX_MGLS |
+                       AMD_CG_SUPPORT_GFX_CGTS_LS |
+                       AMD_CG_SUPPORT_GFX_CGCG |
+                       AMD_CG_SUPPORT_GFX_CGLS |
                        AMD_CG_SUPPORT_BIF_LS |
                        AMD_CG_SUPPORT_HDP_MGCG |
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
                        AMD_CG_SUPPORT_SDMA_LS;
-               adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x1;
                break;
        default:
index ec4036a09f3e78f7343a4ba87d88dc1aa0d71b21..a625b9137da226de64fb0d024834968b9911ab4f 100644 (file)
@@ -187,12 +187,12 @@ int init_pipelines(struct device_queue_manager *dqm,
 unsigned int get_first_pipe(struct device_queue_manager *dqm);
 unsigned int get_pipes_num(struct device_queue_manager *dqm);
 
-extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
+static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
 {
        return (pdd->lds_base >> 16) & 0xFF;
 }
 
-extern inline unsigned int
+static inline unsigned int
 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
 {
        return (pdd->lds_base >> 60) & 0x0E;
index d0d5f4baf72d296ed8228a138ac34be1018c098c..80113c335966889afbe6c9a8c58f7e219b18e418 100644 (file)
@@ -617,10 +617,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd);
 int kfd_init_apertures(struct kfd_process *process);
 
 /* Queue Context Management */
-inline uint32_t lower_32(uint64_t x);
-inline uint32_t upper_32(uint64_t x);
 struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
-inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
 
 int init_queue(struct queue **q, struct queue_properties properties);
 void uninit_queue(struct queue *q);
index ac005796b71c1491fb509bc3705b6e5f5c75442a..4f3849ac8c0770d1dbde09f9d6e371fb24c01e94 100644 (file)
@@ -63,13 +63,12 @@ static struct kfd_process *create_process(const struct task_struct *thread);
 void kfd_process_create_wq(void)
 {
        if (!kfd_process_wq)
-               kfd_process_wq = create_workqueue("kfd_process_wq");
+               kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
 }
 
 void kfd_process_destroy_wq(void)
 {
        if (kfd_process_wq) {
-               flush_workqueue(kfd_process_wq);
                destroy_workqueue(kfd_process_wq);
                kfd_process_wq = NULL;
        }
@@ -242,13 +241,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
        pqm_uninit(&p->pqm);
 
        /* Iterate over all process device data structure and check
-        * if we should reset all wavefronts */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+        * if we should delete debug managers and reset all wavefronts
+        */
+       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+               if ((pdd->dev->dbgmgr) &&
+                               (pdd->dev->dbgmgr->pasid == p->pasid))
+                       kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
+
                if (pdd->reset_wavefronts) {
                        pr_warn("amdkfd: Resetting all wave fronts\n");
                        dbgdev_wave_reset_wavefronts(pdd->dev, p);
                        pdd->reset_wavefronts = false;
                }
+       }
 
        mutex_unlock(&p->mutex);
 
@@ -324,6 +329,7 @@ err_process_pqm_init:
        synchronize_rcu();
        mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
 err_mmu_notifier:
+       mutex_destroy(&process->mutex);
        kfd_pasid_free(process->pasid);
 err_alloc_pasid:
        kfree(process->queues);
@@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
 
        idx = srcu_read_lock(&kfd_processes_srcu);
 
+       /*
+        * Look for the process that matches the pasid. If there is no such
+        * process, we either released it in amdkfd's own notifier, or there
+        * is a bug. Unfortunately, there is no way to tell...
+        */
        hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
-               if (p->pasid == pasid)
-                       break;
+               if (p->pasid == pasid) {
 
-       srcu_read_unlock(&kfd_processes_srcu, idx);
+                       srcu_read_unlock(&kfd_processes_srcu, idx);
 
-       BUG_ON(p->pasid != pasid);
+                       pr_debug("Unbinding process %d from IOMMU\n", pasid);
 
-       mutex_lock(&p->mutex);
+                       mutex_lock(&p->mutex);
 
-       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
-               kfd_dbgmgr_destroy(dev->dbgmgr);
+                       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+                               kfd_dbgmgr_destroy(dev->dbgmgr);
 
-       pqm_uninit(&p->pqm);
+                       pqm_uninit(&p->pqm);
 
-       pdd = kfd_get_process_device_data(dev, p);
+                       pdd = kfd_get_process_device_data(dev, p);
 
-       if (!pdd) {
-               mutex_unlock(&p->mutex);
-               return;
-       }
+                       if (!pdd) {
+                               mutex_unlock(&p->mutex);
+                               return;
+                       }
 
-       if (pdd->reset_wavefronts) {
-               dbgdev_wave_reset_wavefronts(pdd->dev, p);
-               pdd->reset_wavefronts = false;
-       }
+                       if (pdd->reset_wavefronts) {
+                               dbgdev_wave_reset_wavefronts(pdd->dev, p);
+                               pdd->reset_wavefronts = false;
+                       }
 
-       /*
-        * Just mark pdd as unbound, because we still need it to call
-        * amd_iommu_unbind_pasid() in when the process exits.
-        * We don't call amd_iommu_unbind_pasid() here
-        * because the IOMMU called us.
-        */
-       pdd->bound = false;
+                       /*
+                        * Just mark pdd as unbound, because we still need it
+                        * to call amd_iommu_unbind_pasid() in when the
+                        * process exits.
+                        * We don't call amd_iommu_unbind_pasid() here
+                        * because the IOMMU called us.
+                        */
+                       pdd->bound = false;
 
-       mutex_unlock(&p->mutex);
+                       mutex_unlock(&p->mutex);
+
+                       return;
+               }
+
+       srcu_read_unlock(&kfd_processes_srcu, idx);
 }
 
 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
index 74909e72a00929c8a87b73c9e25afb633fb50cf4..884c96f50c3d9b62ccc4c9f81e20570d41d72f46 100644 (file)
@@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                        dev->node_props.simd_count);
 
        if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
-               pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+               pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
                                dev->node_props.mem_banks_count,
                                dev->mem_bank_count);
                sysfs_show_32bit_prop(buffer, "mem_banks_count",
index 7c2a916c1e63c3e6a00bfeafbda7a92444725085..5eb895fd98bfb6a65de4fd16e595d85c12d7830d 100644 (file)
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK   0x0000FFFF
 #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT  0
 
+/* gen: chipset 1/2, asic 1/2/3 */
+#define AMDGPU_DEFAULT_PCIE_GEN_MASK (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+                                     | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+                                     | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 \
+                                     | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 \
+                                     | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
+
 /* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1          0x00010000
 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2          0x00020000
 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32         0x00400000
 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT       16
 
+/* 1/2/4/8/16 lanes */
+#define AMDGPU_DEFAULT_PCIE_MLW_MASK (CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 \
+                                     | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 \
+                                     | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 \
+                                     | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
+                                     | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+
 #endif
index 6080951d539d0d3fd4c85390a0afa18ce2df8cd7..a74a0d2ff1cac6008be2b634184f9edc42185b10 100644 (file)
 
 #define AMD_MAX_USEC_TIMEOUT           100000  /* 100 ms */
 
-/*
-* Supported GPU families (aligned with amdgpu_drm.h)
-*/
-#define AMD_FAMILY_UNKNOWN              0
-#define AMD_FAMILY_CI                   120 /* Bonaire, Hawaii */
-#define AMD_FAMILY_KV                   125 /* Kaveri, Kabini, Mullins */
-#define AMD_FAMILY_VI                   130 /* Iceland, Tonga */
-#define AMD_FAMILY_CZ                   135 /* Carrizo */
-
 /*
  * Supported ASIC types
  */
@@ -120,6 +111,8 @@ enum amd_powergating_state {
 #define AMD_PG_SUPPORT_SDMA                    (1 << 8)
 #define AMD_PG_SUPPORT_ACP                     (1 << 9)
 #define AMD_PG_SUPPORT_SAMU                    (1 << 10)
+#define AMD_PG_SUPPORT_GFX_QUICK_MG            (1 << 11)
+#define AMD_PG_SUPPORT_GFX_PIPELINE            (1 << 12)
 
 enum amd_pm_state_type {
        /* not used for dpm */
@@ -157,6 +150,7 @@ struct amd_ip_funcs {
        int (*hw_init)(void *handle);
        /* tears down the hw state */
        int (*hw_fini)(void *handle);
+       void (*late_fini)(void *handle);
        /* handles IP specific hw/sw changes for suspend */
        int (*suspend)(void *handle);
        /* handles IP specific hw/sw changes for resume */
index ebaf67bb1589bc8f85b77c822990d9cbc83f4a23..90ff7c8a6011e84a6d1d67258cb1a941ecac8d42 100644 (file)
 #define mmDC_EDC_CSINVOC_CNT                                                    0x3192
 #define mmDC_EDC_RESTORE_CNT                                                    0x3193
 
+#define mmGC_CAC_IND_INDEX                                                      0x129a
+#define mmGC_CAC_IND_DATA                                                       0x129b
+
 #endif /* GFX_8_0_D_H */
index 7d722458d9f51a7bb05801040fa32edf68bfdb52..4070ca3a68eb9b361a45303d3ee5c44571a7f55c 100644 (file)
 #define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000
 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
-#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000
-#define RLC_GPM_STAT__RESERVED__SHIFT 0x12
 #define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000
 #define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
 #define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f
 #define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000
 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
-#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000
-#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14
+#define RLC_PG_CNTL__QUICK_PG_ENABLE_MASK 0x100000
+#define RLC_PG_CNTL__QUICK_PG_ENABLE__SHIFT 0x14
+#define RLC_PG_CNTL__RESERVED1_MASK 0xe00000
+#define RLC_PG_CNTL__RESERVED1__SHIFT 0x15
 #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff
 #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
 #define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00
 #define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
 #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff
 #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
-#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00
-#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
 #define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff
 #define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
 #define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff
 #define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8
 #define RLC_SRM_DEBUG__DATA_MASK 0xffffffff
 #define RLC_SRM_DEBUG__DATA__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
-#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00
-#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa
 #define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff
 #define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
-#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff
-#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
 #define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00
 #define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa
 #define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff
 #define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000
 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000
-#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
 #define VGT_TF_RING_SIZE__SIZE_MASK 0xffff
 #define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
 #define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1
 #define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
-#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6
 #define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff
 #define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0
 #define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000
 #define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000
 #define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18
 
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK    0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT  0x00000000
+
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK       0x0000007e
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK       0x00001f80L
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT     0x00000001
+#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT     0x00000007
+
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK   0x1fffe000L
+#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK                  0xe0000000L
+#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT                0x0000001d
+
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK       0x00000001L
+#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT     0x00000000
+
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK       0x00007ffeL
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT     0x00000001
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK       0x1fff8000L
+#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT     0x0000000f
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK    0x00000001L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT  0x00000000
+
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK       0x0000007eL
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK       0x00001f80L
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT     0x00000001
+#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT     0x00000007
+
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK   0x1fffe000L
+#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK     0x00000fc0L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK     0x0003f000L
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT   0x00000006
+#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT   0x0000000c
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK       0x00000001L
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK       0x00007ffeL
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK       0x1fff8000L
+
+#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT     0x00000000
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT     0x00000001
+#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT     0x0000000f
+
+#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK                  0xe0000000L
+#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT                0x0000001d
+
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK     0x00000fc0L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK     0x0003f000L
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT   0x00000006
+#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT   0x0000000c
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK   0x00000001L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
+
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK      0x0000007eL
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK      0x00001f80L
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT    0x00000001
+#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT    0x00000007
+
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK  0x1fffe000L
+#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
+
+#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK                 0xe0000000L
+#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT               0x0000001d
+
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK      0x00000001L
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK      0x00007ffeL
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK      0x1fff8000L
+#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT    0x00000000
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT    0x00000001
+#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT    0x0000000f
+
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK    0x00000fc0L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK    0x0003f000L
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT  0x00000006
+#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT  0x0000000c
+
 #endif /* GFX_8_0_SH_MASK_H */
index 32f3e345de0809c8c04b62d59398b971c05be07e..3493da5c8f0efcaef97f2b07145d91097665b703 100644 (file)
@@ -5538,6 +5538,78 @@ typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_5
   ULONG  ulReserved[12];
 }ATOM_ASIC_PROFILING_INFO_V3_5;
 
+/* for Polars10/11 AVFS parameters */
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_6
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  ULONG  ulMaxVddc;
+  ULONG  ulMinVddc;
+  USHORT usLkgEuseIndex;
+  UCHAR  ucLkgEfuseBitLSB;
+  UCHAR  ucLkgEfuseLength;
+  ULONG  ulLkgEncodeLn_MaxDivMin;
+  ULONG  ulLkgEncodeMax;
+  ULONG  ulLkgEncodeMin;
+  EFUSE_LINEAR_FUNC_PARAM sRoFuse;
+  ULONG  ulEvvDefaultVddc;
+  ULONG  ulEvvNoCalcVddc;
+  ULONG  ulSpeed_Model;
+  ULONG  ulSM_A0;
+  ULONG  ulSM_A1;
+  ULONG  ulSM_A2;
+  ULONG  ulSM_A3;
+  ULONG  ulSM_A4;
+  ULONG  ulSM_A5;
+  ULONG  ulSM_A6;
+  ULONG  ulSM_A7;
+  UCHAR  ucSM_A0_sign;
+  UCHAR  ucSM_A1_sign;
+  UCHAR  ucSM_A2_sign;
+  UCHAR  ucSM_A3_sign;
+  UCHAR  ucSM_A4_sign;
+  UCHAR  ucSM_A5_sign;
+  UCHAR  ucSM_A6_sign;
+  UCHAR  ucSM_A7_sign;
+  ULONG  ulMargin_RO_a;
+  ULONG  ulMargin_RO_b;
+  ULONG  ulMargin_RO_c;
+  ULONG  ulMargin_fixed;
+  ULONG  ulMargin_Fmax_mean;
+  ULONG  ulMargin_plat_mean;
+  ULONG  ulMargin_Fmax_sigma;
+  ULONG  ulMargin_plat_sigma;
+  ULONG  ulMargin_DC_sigma;
+  ULONG  ulLoadLineSlop;
+  ULONG  ulaTDClimitPerDPM[8];
+  ULONG  ulaNoCalcVddcPerDPM[8];
+  ULONG  ulAVFS_meanNsigma_Acontant0;
+  ULONG  ulAVFS_meanNsigma_Acontant1;
+  ULONG  ulAVFS_meanNsigma_Acontant2;
+  USHORT usAVFS_meanNsigma_DC_tol_sigma;
+  USHORT usAVFS_meanNsigma_Platform_mean;
+  USHORT usAVFS_meanNsigma_Platform_sigma;
+  ULONG  ulGB_VDROOP_TABLE_CKSOFF_a0;
+  ULONG  ulGB_VDROOP_TABLE_CKSOFF_a1;
+  ULONG  ulGB_VDROOP_TABLE_CKSOFF_a2;
+  ULONG  ulGB_VDROOP_TABLE_CKSON_a0;
+  ULONG  ulGB_VDROOP_TABLE_CKSON_a1;
+  ULONG  ulGB_VDROOP_TABLE_CKSON_a2;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+  USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSON_m1;
+  USHORT usAVFSGB_FUSE_TABLE_CKSON_m2;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSON_b;
+  USHORT usMaxVoltage_0_25mv;
+  UCHAR  ucEnableGB_VDROOP_TABLE_CKSOFF;
+  UCHAR  ucEnableGB_VDROOP_TABLE_CKSON;
+  UCHAR  ucEnableGB_FUSE_TABLE_CKSOFF;
+  UCHAR  ucEnableGB_FUSE_TABLE_CKSON;
+  USHORT usPSM_Age_ComFactor;
+  UCHAR  ucEnableApplyAVFS_CKS_OFF_Voltage;
+  UCHAR  ucReserved;
+}ATOM_ASIC_PROFILING_INFO_V3_6;
+
 
 typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
   ULONG  ulMaxSclkFreq;
index a461e155a160240f7f127c156b902b56651c523b..f32af2f2091a626a092f79b79f8c102e6f724395 100644 (file)
@@ -49,6 +49,7 @@ enum cgs_ind_reg {
        CGS_IND_REG__SMC,
        CGS_IND_REG__UVD_CTX,
        CGS_IND_REG__DIDT,
+       CGS_IND_REG_GC_CAC,
        CGS_IND_REG__AUDIO_ENDPT
 };
 
@@ -115,6 +116,7 @@ enum cgs_system_info_id {
        CGS_SYSTEM_INFO_CG_FLAGS,
        CGS_SYSTEM_INFO_PG_FLAGS,
        CGS_SYSTEM_INFO_GFX_CU_INFO,
+       CGS_SYSTEM_INFO_GFX_SE_INFO,
        CGS_SYSTEM_INFO_ID_MAXIMUM,
 };
 
@@ -158,6 +160,10 @@ struct cgs_firmware_info {
        uint16_t                feature_version;
        uint32_t                image_size;
        uint64_t                mc_addr;
+
+       /* only for smc firmware */
+       uint32_t                ucode_start_address;
+
        void                    *kptr;
 };
 
@@ -189,7 +195,6 @@ typedef unsigned long cgs_handle_t;
 
 struct cgs_acpi_method_argument {
        uint32_t type;
-       uint32_t method_length;
        uint32_t data_length;
        union{
                uint32_t value;
@@ -581,6 +586,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
                                     enum cgs_ucode_id type,
                                     struct cgs_firmware_info *info);
 
+typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
+                                        enum cgs_ucode_id type);
+
 typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
                                  enum amd_ip_block_type block_type,
                                  enum amd_powergating_state state);
@@ -645,6 +653,7 @@ struct cgs_ops {
        cgs_set_camera_voltages_t set_camera_voltages;
        /* Firmware Info */
        cgs_get_firmware_info get_firmware_info;
+       cgs_rel_firmware rel_firmware;
        /* cg pg interface*/
        cgs_set_powergating_state set_powergating_state;
        cgs_set_clockgating_state set_clockgating_state;
@@ -738,6 +747,8 @@ struct cgs_device
        CGS_CALL(set_camera_voltages,dev,mask,voltages)
 #define cgs_get_firmware_info(dev, type, info) \
        CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_rel_firmware(dev, type)    \
+       CGS_CALL(rel_firmware, dev, type)
 #define cgs_set_powergating_state(dev, block_type, state)      \
        CGS_CALL(set_powergating_state, dev, block_type, state)
 #define cgs_set_clockgating_state(dev, block_type, state)      \
index 8e345bfddb693033d08584d1093f254ef9619a38..f9e03ad0baa29faea18d5bd0fc123c9f7b769c88 100644 (file)
@@ -73,11 +73,14 @@ static int pp_sw_init(void *handle)
 
        ret = hwmgr->hwmgr_func->backend_init(hwmgr);
        if (ret)
-               goto err;
+               goto err1;
 
        pr_info("amdgpu: powerplay initialized\n");
 
        return 0;
+err1:
+       if (hwmgr->pptable_func->pptable_fini)
+               hwmgr->pptable_func->pptable_fini(hwmgr);
 err:
        pr_err("amdgpu: powerplay initialization failed\n");
        return ret;
@@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle)
        if (hwmgr->hwmgr_func->backend_fini != NULL)
                ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
 
+       if (hwmgr->pptable_func->pptable_fini)
+               hwmgr->pptable_func->pptable_fini(hwmgr);
+
        return ret;
 }
 
@@ -530,6 +536,10 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
        case AMD_PP_EVENT_COMPLETE_INIT:
                ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
                break;
+       case AMD_PP_EVENT_READJUST_POWER_STATE:
+               pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
+               ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
+               break;
        default:
                break;
        }
@@ -734,12 +744,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
 
        PP_CHECK_HW(hwmgr);
 
-       if (hwmgr->hwmgr_func->get_pp_table == NULL) {
-               printk(KERN_INFO "%s was not implemented.\n", __func__);
-               return 0;
-       }
+       if (!hwmgr->soft_pp_table)
+               return -EINVAL;
+
+       *table = (char *)hwmgr->soft_pp_table;
 
-       return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
+       return hwmgr->soft_pp_table_size;
 }
 
 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
@@ -753,12 +763,23 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
 
        PP_CHECK_HW(hwmgr);
 
-       if (hwmgr->hwmgr_func->set_pp_table == NULL) {
-               printk(KERN_INFO "%s was not implemented.\n", __func__);
-               return 0;
+       if (!hwmgr->hardcode_pp_table) {
+               hwmgr->hardcode_pp_table =
+                               kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+
+               if (!hwmgr->hardcode_pp_table)
+                       return -ENOMEM;
+
+               /* to avoid powerplay crash when hardcode pptable is empty */
+               memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
+                               hwmgr->soft_pp_table_size);
        }
 
-       return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
+       memcpy(hwmgr->hardcode_pp_table, buf, size);
+
+       hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
+
+       return amd_powerplay_reset(handle);
 }
 
 static int pp_dpm_force_clock_level(void *handle,
@@ -800,6 +821,82 @@ static int pp_dpm_print_clock_levels(void *handle,
        return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
 }
 
+static int pp_dpm_get_sclk_od(void *handle)
+{
+       struct pp_hwmgr *hwmgr;
+
+       if (!handle)
+               return -EINVAL;
+
+       hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+       PP_CHECK_HW(hwmgr);
+
+       if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
+               printk(KERN_INFO "%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
+}
+
+static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
+{
+       struct pp_hwmgr *hwmgr;
+
+       if (!handle)
+               return -EINVAL;
+
+       hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+       PP_CHECK_HW(hwmgr);
+
+       if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
+               printk(KERN_INFO "%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
+}
+
+static int pp_dpm_get_mclk_od(void *handle)
+{
+       struct pp_hwmgr *hwmgr;
+
+       if (!handle)
+               return -EINVAL;
+
+       hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+       PP_CHECK_HW(hwmgr);
+
+       if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
+               printk(KERN_INFO "%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
+}
+
+static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
+{
+       struct pp_hwmgr *hwmgr;
+
+       if (!handle)
+               return -EINVAL;
+
+       hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+       PP_CHECK_HW(hwmgr);
+
+       if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
+               printk(KERN_INFO "%s was not implemented.\n", __func__);
+               return 0;
+       }
+
+       return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
+}
+
 const struct amd_powerplay_funcs pp_dpm_funcs = {
        .get_temperature = pp_dpm_get_temperature,
        .load_firmware = pp_dpm_load_fw,
@@ -822,6 +919,10 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
        .set_pp_table = pp_dpm_set_pp_table,
        .force_clock_level = pp_dpm_force_clock_level,
        .print_clock_levels = pp_dpm_print_clock_levels,
+       .get_sclk_od = pp_dpm_get_sclk_od,
+       .set_sclk_od = pp_dpm_set_sclk_od,
+       .get_mclk_od = pp_dpm_get_mclk_od,
+       .set_mclk_od = pp_dpm_set_mclk_od,
 };
 
 static int amd_pp_instance_init(struct amd_pp_init *pp_init,
@@ -903,6 +1004,44 @@ int amd_powerplay_fini(void *handle)
        return 0;
 }
 
+int amd_powerplay_reset(void *handle)
+{
+       struct pp_instance *instance = (struct pp_instance *)handle;
+       struct pp_eventmgr *eventmgr;
+       struct pem_event_data event_data = { {0} };
+       int ret;
+
+       if (instance == NULL)
+               return -EINVAL;
+
+       eventmgr = instance->eventmgr;
+       if (!eventmgr || !eventmgr->pp_eventmgr_fini)
+               return -EINVAL;
+
+       eventmgr->pp_eventmgr_fini(eventmgr);
+
+       ret = pp_sw_fini(handle);
+       if (ret)
+               return ret;
+
+       kfree(instance->hwmgr->ps);
+
+       ret = pp_sw_init(handle);
+       if (ret)
+               return ret;
+
+       hw_init_power_state_table(instance->hwmgr);
+
+       if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
+               return -EINVAL;
+
+       ret = eventmgr->pp_eventmgr_init(eventmgr);
+       if (ret)
+               return ret;
+
+       return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
+}
+
 /* export this function to DAL */
 
 int amd_powerplay_display_configuration_change(void *handle,
index 46410e3c73493acce741f955f60790fc9745256e..fb88e4e5d625815ab213b1011392bce0ef96eae6 100644 (file)
@@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
        pem_unregister_interrupts(eventmgr);
 
        pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-
-       if (eventmgr != NULL)
-               kfree(eventmgr);
 }
 
 int eventmgr_init(struct pp_instance *handle)
index 5cd123472db471bcf4d5b3cfe4f21fc5000d4b17..b6f45fd01fa61621d2f1e776f94e26a5dd938f82 100644 (file)
@@ -132,8 +132,7 @@ int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struc
 
 int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
 {
-       /* TODO */
-       return 0;
+       return phm_disable_dynamic_state_management(eventmgr->hwmgr);
 }
 
 int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
index 436fc16dabb660e0970068ecc4eb0be3c2ed44f6..2da548f6337e822d3eeae9783d1b37190aa07d8c 100644 (file)
@@ -206,7 +206,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                                        AMD_IP_BLOCK_TYPE_VCE,
                                                        AMD_PG_STATE_GATE);
                                cz_enable_disable_vce_dpm(hwmgr, false);
-                       /* TODO: to figure out why vce can't be poweroff*/
+                               cz_dpm_powerdown_vce(hwmgr);
                                cz_hwmgr->vce_power_gated = true;
                        } else {
                                cz_dpm_powerup_vce(hwmgr);
@@ -225,6 +225,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                        }
                }
        } else {
+               cz_hwmgr->vce_power_gated = bgate;
                cz_dpm_update_vce_dpm(hwmgr);
                cz_enable_disable_vce_dpm(hwmgr, !bgate);
                return 0;
index 1f14c477d15d3a490c89f316ce48dd077503e98f..9bf622e123b652e3e1d9fcd6676fafea1f041779 100644 (file)
@@ -1180,6 +1180,13 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
        int result = 0;
+       struct cz_hwmgr *data;
+
+       data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       hwmgr->backend = data;
 
        result = cz_initialize_dpm_defaults(hwmgr);
        if (result != 0) {
@@ -1909,15 +1916,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
 
 int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
-       struct cz_hwmgr *cz_hwmgr;
-       int ret = 0;
-
-       cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
-       if (cz_hwmgr == NULL)
-               return -ENOMEM;
-
-       hwmgr->backend = cz_hwmgr;
        hwmgr->hwmgr_func = &cz_hwmgr_funcs;
        hwmgr->pptable_func = &pptable_funcs;
-       return ret;
+       return 0;
 }
index 24a16e49b5713d07f8a53aa81fb719ab75383500..744aa886a2be789abc63c3c626dc2f07b3a39edf 100644 (file)
@@ -581,25 +581,24 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
 
 static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (data->soft_pp_table) {
-               kfree(data->soft_pp_table);
-               data->soft_pp_table = NULL;
-       }
-
        return phm_hwmgr_backend_fini(hwmgr);
 }
 
 static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_hwmgr *data;
        uint32_t i;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
        bool stay_in_boot;
        int result;
 
+       data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       hwmgr->backend = data;
+
        data->dll_default_on = false;
        data->sram_end = SMC_RAM_END;
 
@@ -633,6 +632,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
        data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
 
+       data->force_pcie_gen = PP_PCIEGenInvalid;
+
        if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
                data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
@@ -732,7 +733,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_gen_cap = 0x30007;
+                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
                else
                        data->pcie_gen_cap = (uint32_t)sys_info.value;
                if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -741,7 +742,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_lane_cap = 0x2f0000;
+                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
                else
                        data->pcie_lane_cap = (uint32_t)sys_info.value;
        } else {
@@ -1234,6 +1235,34 @@ static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+       /* Reset voting clients before disabling DPM */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_0, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_1, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_2, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_3, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_4, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_5, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_6, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_7, 0);
+
+       return 0;
+}
+
 /**
 * Get the location of various tables inside the FW image.
 *
@@ -1360,6 +1389,17 @@ static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+/**
+* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
+*
+* @param    hwmgr  the address of the powerplay hardware manager.
+* @return   if success then 0;
+*/
+static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
 /**
 * Initial switch from ARB F0->F1
 *
@@ -1373,6 +1413,21 @@ static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
                        MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
 }
 
+static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+       uint32_t tmp;
+
+       tmp = (cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+                       0x0000ff00) >> 8;
+
+       if (tmp == MC_CG_ARB_FREQ_F0)
+               return 0;
+
+       return fiji_copy_and_switch_arb_sets(hwmgr,
+                       tmp, MC_CG_ARB_FREQ_F0);
+}
+
 static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
                struct fiji_single_dpm_table *dpm_table, uint32_t count)
 {
@@ -1830,7 +1885,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
 
        PP_ASSERT_WITH_CODE(false,
                        "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-                       return vddci_table->entries[i].value);
+                       return vddci_table->entries[i-1].value);
 }
 
 static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
@@ -3175,6 +3230,17 @@ static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_ulv_parm *ulv = &(data->ulv);
+
+       if (ulv->ulv_supported)
+               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+       return 0;
+}
+
 static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
 {
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -3195,6 +3261,21 @@ static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep)) {
+               if (smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to disable Master Deep Sleep switch failed!",
+                                       return -1);
+               }
+       }
+
+       return 0;
+}
+
 static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 {
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -3355,6 +3436,70 @@ static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+       /* disable SCLK dpm */
+       if (!data->sclk_dpm_key_disabled)
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_DPM_Disable) == 0),
+                               "Failed to disable SCLK DPM!",
+                               return -1);
+
+       /* disable MCLK dpm */
+       if (!data->mclk_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                               PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
+                               "Failed to force MCLK DPM0!",
+                               return -1);
+
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_MCLKDPM_Disable) == 0),
+                               "Failed to disable MCLK DPM!",
+                               return -1);
+       }
+
+       return 0;
+}
+
+static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+       /* disable general power management */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                       GLOBAL_PWRMGT_EN, 0);
+       /* disable sclk deep sleep */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+                       DYNAMIC_PM_EN, 0);
+
+       /* disable PCIE dpm */
+       if (!data->pcie_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_PCIeDPM_Disable) == 0),
+                               "Failed to disable pcie DPM during DPM Stop Function!",
+                               return -1);
+       }
+
+       if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
+               printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+               return -1;
+       }
+
+       PP_ASSERT_WITH_CODE(
+                       (smum_send_msg_to_smc(hwmgr->smumgr,
+                                       PPSMC_MSG_Voltage_Cntl_Disable) == 0),
+                       "Failed to disable voltage DPM during DPM Stop Function!",
+                       return -1);
+
+       return 0;
+}
+
 static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
                uint32_t sources)
 {
@@ -3413,6 +3558,23 @@ static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
        return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
 }
 
+static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+               PHM_AutoThrottleSource source)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+       if (data->active_auto_throttle_sources & (1 << source)) {
+               data->active_auto_throttle_sources &= ~(1 << source);
+               fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+       }
+       return 0;
+}
+
+static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+       return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
 static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
        int tmp_result, result = 0;
@@ -3527,6 +3689,64 @@ static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+       int tmp_result, result = 0;
+
+       tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
+       PP_ASSERT_WITH_CODE(tmp_result == 0,
+                       "DPM is not running right now, no need to disable DPM!",
+                       return 0);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalController))
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+       tmp_result = fiji_disable_power_containment(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable power containment!", result = tmp_result);
+
+       tmp_result = fiji_disable_smc_cac(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable SMC CAC!", result = tmp_result);
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+       tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable thermal auto throttle!", result = tmp_result);
+
+       tmp_result = fiji_stop_dpm(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to stop DPM!", result = tmp_result);
+
+       tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable deep sleep master switch!", result = tmp_result);
+
+       tmp_result = fiji_disable_ulv(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable ULV!", result = tmp_result);
+
+       tmp_result = fiji_clear_voting_clients(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to clear voting clients!", result = tmp_result);
+
+       tmp_result = fiji_reset_to_default(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to reset to default!", result = tmp_result);
+
+       tmp_result = fiji_force_switch_to_arbf0(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to force to switch arbf0!", result = tmp_result);
+
+       return result;
+}
+
 static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
 {
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -5069,42 +5289,6 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
                                CG_FDO_CTRL2, FDO_PWM_MODE);
 }
 
-static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
-                                             hwmgr->soft_pp_table_size,
-                                             GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       *table = (char *)&data->soft_pp_table;
-
-       return hwmgr->soft_pp_table_size;
-}
-
-static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
-       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       memcpy(data->soft_pp_table, buf, size);
-
-       hwmgr->soft_pp_table = data->soft_pp_table;
-
-       /* TODO: re-init powerplay to implement modified pptable */
-
-       return 0;
-}
-
 static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
@@ -5274,12 +5458,96 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h
        return is_update_required;
 }
 
+static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+       struct fiji_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       int value;
+
+       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+                       100 /
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return value;
+}
+
+static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       struct pp_power_state  *ps;
+       struct fiji_power_state  *fiji_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+       fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+                       value / 100 +
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return 0;
+}
+
+static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+       struct fiji_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       int value;
+
+       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+                       100 /
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return value;
+}
+
+static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       struct fiji_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       struct pp_power_state  *ps;
+       struct fiji_power_state  *fiji_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
+
+       fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+                       value / 100 +
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return 0;
+}
 
 static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
        .backend_init = &fiji_hwmgr_backend_init,
        .backend_fini = &fiji_hwmgr_backend_fini,
        .asic_setup = &fiji_setup_asic_task,
        .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
+       .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
        .force_dpm_level = &fiji_dpm_force_dpm_level,
        .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
        .get_power_state_size = &fiji_get_power_state_size,
@@ -5312,24 +5580,18 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
        .get_fan_control_mode = fiji_get_fan_control_mode,
        .check_states_equal = fiji_check_states_equal,
        .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
-       .get_pp_table = fiji_get_pp_table,
-       .set_pp_table = fiji_set_pp_table,
        .force_clock_level = fiji_force_clock_level,
        .print_clock_levels = fiji_print_clock_levels,
+       .get_sclk_od = fiji_get_sclk_od,
+       .set_sclk_od = fiji_set_sclk_od,
+       .get_mclk_od = fiji_get_mclk_od,
+       .set_mclk_od = fiji_set_mclk_od,
 };
 
 int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
-       struct fiji_hwmgr  *data;
-       int ret = 0;
-
-       data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
-
-       hwmgr->backend = data;
        hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
        hwmgr->pptable_func = &tonga_pptable_funcs;
        pp_fiji_thermal_initialize(hwmgr);
-       return ret;
+       return 0;
 }
index 170edf5a772dabb04b8fb683104c141f24d53862..bf67c2a92c68366607efaa8402e1209c4b3ad0be 100644 (file)
@@ -302,9 +302,6 @@ struct fiji_hwmgr {
        bool                           pg_acp_init;
        bool                           frtc_enabled;
        bool                           frtc_status_changed;
-
-       /* soft pptable for re-uploading into smu */
-       void *soft_pp_table;
 };
 
 /* To convert to Q8.8 format for firmware */
index db23a4068baf291f948eb891fd4c2739bef5d8cb..44658451a8d2eef6c4a5c0e061cb2351bcad0ec3 100644 (file)
@@ -72,18 +72,19 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
        fiji_hwmgr->dte_tj_offset = tmp;
 
        if (!tmp) {
-               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                               PHM_PlatformCaps_PowerContainment);
-
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_CAC);
 
                fiji_hwmgr->fast_watermark_threshold = 100;
 
-               tmp = 1;
-               fiji_hwmgr->enable_dte_feature = tmp ? false : true;
-               fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
-               fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+               if (hwmgr->powercontainment_enabled) {
+                       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                   PHM_PlatformCaps_PowerContainment);
+                       tmp = 1;
+                       fiji_hwmgr->enable_dte_feature = tmp ? false : true;
+                       fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
+                       fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
+               }
        }
 }
 
@@ -459,6 +460,23 @@ int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CAC) && data->cac_enabled) {
+               int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                               (uint16_t)(PPSMC_MSG_DisableCac));
+               PP_ASSERT_WITH_CODE((smc_result == 0),
+                               "Failed to disable CAC in SMC.", result = -1);
+
+               data->cac_enabled = false;
+       }
+       return result;
+}
+
 int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 {
        struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -528,6 +546,48 @@ int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+       struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment) &&
+                       data->power_containment_features) {
+               int smc_result;
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_TDCLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable TDCLimit in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_DTE) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_DisableDTE));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable DTE in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable PkgPwrTracking in SMC.",
+                                       result = smc_result);
+               }
+               data->power_containment_features = 0;
+       }
+
+       return result;
+}
+
 int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
 {
        struct phm_ppt_v1_information *table_info =
index 55e58200f33a8ab2ffaa8645fbbd9e923a091439..fec7724217339c68d0c5f3c91fe02ba4d8f8bfab 100644 (file)
@@ -36,6 +36,19 @@ enum fiji_pt_config_reg_type {
 #define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit     0x00000004
 
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK             0xffffffc0
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT           0x6
+#define DIDT_TD_CTRL0__UNUSED_0_MASK             0xffffffc0
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT           0x6
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK            0xffffffc0
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT          0x6
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xe0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001d
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xe0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001d
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xe0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001d
+
 struct fiji_pt_config_reg {
        uint32_t                           offset;
        uint32_t                           mask;
@@ -58,7 +71,9 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
 int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
 int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
 int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
 int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
+int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
 int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
 int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
 
index fa208ada689219f4eb072ebb5e655e0a03cc9ab7..789f98ad2615027c8182ebfda29173e3b30b38db 100644 (file)
@@ -154,6 +154,30 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
        return ret;
 }
 
+int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
+{
+       int ret = -1;
+       bool enabled;
+
+       PHM_FUNC_CHECK(hwmgr);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+               PHM_PlatformCaps_TablelessHardwareInterface)) {
+               if (hwmgr->hwmgr_func->dynamic_state_management_disable)
+                       ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
+       } else {
+               ret = phm_dispatch_table(hwmgr,
+                               &(hwmgr->disable_dynamic_state_management),
+                               NULL, NULL);
+       }
+
+       enabled = ret == 0 ? false : true;
+
+       cgs_notify_dpm_enabled(hwmgr->device, enabled);
+
+       return ret;
+}
+
 int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
 {
        PHM_FUNC_CHECK(hwmgr);
@@ -306,11 +330,15 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
 {
        PHM_FUNC_CHECK(hwmgr);
 
-       if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+       if (display_config == NULL)
                return -EINVAL;
 
        hwmgr->display_config = *display_config;
-       /* to do pass other display configuration in furture */
+
+       if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+               return -EINVAL;
+
+       /* TODO: pass other display configuration in the future */
 
        if (hwmgr->hwmgr_func->store_cc6_data)
                hwmgr->hwmgr_func->store_cc6_data(hwmgr,
index 1c48917da3cf3712b39eb72aaf1511fc3936845c..03b6128ebc203c66ad26cf2f2cb5f15786e08820 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
 #include "cgs_common.h"
 #include "power_state.h"
 #include "hwmgr.h"
@@ -58,12 +59,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
        hwmgr->hw_revision = pp_init->rev_id;
        hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
        hwmgr->power_source = PP_PowerSource_AC;
+       hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
 
        switch (hwmgr->chip_family) {
-       case AMD_FAMILY_CZ:
+       case AMDGPU_FAMILY_CZ:
                cz_hwmgr_init(hwmgr);
                break;
-       case AMD_FAMILY_VI:
+       case AMDGPU_FAMILY_VI:
                switch (hwmgr->chip_id) {
                case CHIP_TONGA:
                        tonga_hwmgr_init(hwmgr);
@@ -93,6 +95,15 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
        if (hwmgr == NULL || hwmgr->ps == NULL)
                return -EINVAL;
 
+       /* do hwmgr finish*/
+       kfree(hwmgr->hardcode_pp_table);
+
+       kfree(hwmgr->backend);
+
+       kfree(hwmgr->start_thermal_controller.function_list);
+
+       kfree(hwmgr->set_temperature_range.function_list);
+
        kfree(hwmgr->ps);
        kfree(hwmgr);
        return 0;
@@ -462,7 +473,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
 
        PP_ASSERT_WITH_CODE(false,
                        "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
-                       return vddci_table->entries[i].value);
+                       return vddci_table->entries[i-1].value);
 }
 
 int phm_find_boot_level(void *table,
index 347fef127ce91b26127fdf0f64a91f50a339b90b..2930a3355948990c56961f7f52dc424e6b65d8c1 100644 (file)
@@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record {
        uint8_t phases;
        uint8_t cks_enable;
        uint8_t cks_voffset;
+       uint32_t sclk_offset;
 };
 
 typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
index 8f142a74ad08dd7a13433423fcd1b158b5258ccd..aeec25c66aa83ed5600981b3456cad076d9a7166 100644 (file)
@@ -106,11 +106,17 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
        data->uvd_power_gated = bgate;
 
        if (bgate) {
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_UVD,
+                               AMD_CG_STATE_GATE);
                polaris10_update_uvd_dpm(hwmgr, true);
                polaris10_phm_powerdown_uvd(hwmgr);
        } else {
                polaris10_phm_powerup_uvd(hwmgr);
                polaris10_update_uvd_dpm(hwmgr, false);
+               cgs_set_clockgating_state(hwmgr->device,
+                               AMD_IP_BLOCK_TYPE_UVD,
+                               AMD_PG_STATE_UNGATE);
        }
 
        return 0;
index aa6be033f21b36832ad67f156d6b1954cd8827ea..d1b528b401e90350a2f979e996773505c9ecc022 100644 (file)
@@ -389,6 +389,34 @@ static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+       /* Reset voting clients before disabling DPM */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
+
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_0, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_1, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_2, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_3, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_4, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_5, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_6, 0);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                       ixCG_FREQ_TRAN_VOTING_7, 0);
+
+       return 0;
+}
+
 /**
 * Get the location of various tables inside the FW image.
 *
@@ -515,6 +543,11 @@ static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
+{
+       return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+}
+
 /**
 * Initial switch from ARB F0->F1
 *
@@ -528,6 +561,21 @@ static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
                        MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
 }
 
+static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
+{
+       uint32_t tmp;
+
+       tmp = (cgs_read_ind_register(hwmgr->device,
+                       CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
+                       0x0000ff00) >> 8;
+
+       if (tmp == MC_CG_ARB_FREQ_F0)
+               return 0;
+
+       return polaris10_copy_and_switch_arb_sets(hwmgr,
+                       tmp, MC_CG_ARB_FREQ_F0);
+}
+
 static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -732,7 +780,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        table->Smio[level] |=
                                data->mvdd_voltage_table.entries[level].smio_low;
                }
-               table->SmioMask2 = data->vddci_voltage_table.mask_low;
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
 
                table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
        }
@@ -999,7 +1047,7 @@ static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
                                vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
                                                (dep_table->entries[i].vddc -
                                                                (uint16_t)data->vddc_vddci_delta));
-                               *voltage |= (vddci * VOLTAGE_SCALE) <<  VDDCI_SHIFT;
+                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
                        }
 
                        if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
@@ -1296,7 +1344,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
        }
 
        mem_level->MclkFrequency = clock;
-       mem_level->StutterEnable = 0;
        mem_level->EnabledForThrottle = 1;
        mem_level->EnabledForActivity = 0;
        mem_level->UpHyst = 0;
@@ -1304,7 +1351,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
        mem_level->VoltageDownHyst = 0;
        mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
        mem_level->StutterEnable = false;
-
        mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
 
        data->display_timing.num_existing_displays = info.display_count;
@@ -1358,12 +1404,12 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
                        return result;
        }
 
-       /* in order to prevent MC activity from stutter mode to push DPM up.
+       /* In order to prevent MC activity from stutter mode to push DPM up,
         * the UVD change complements this by putting the MCLK in
-        * a higher state by default such that we are not effected by
+        * a higher state by default such that we are not affected by
         * up threshold or and MCLK DPM latency.
         */
-       levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+       levels[0].ActivityLevel = 0x1f;
        CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
 
        data->smc_state_table.MemoryDpmLevelCount =
@@ -1424,22 +1470,19 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
 
        table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
 
-       if (!data->sclk_dpm_key_disabled) {
-               /* Get MinVoltage and Frequency from DPM0,
-                * already converted to SMC_UL */
-               sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
-               result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_sclk,
-                               table->ACPILevel.SclkFrequency,
-                               &table->ACPILevel.MinVoltage, &mvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Cannot find ACPI VDDC voltage value "
-                               "in Clock Dependency Table", );
-       } else {
-               sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
-               table->ACPILevel.MinVoltage =
-                               data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
-       }
+
+       /* Get MinVoltage and Frequency from DPM0,
+        * already converted to SMC_UL */
+       sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_sclk,
+                       sclk_frequency,
+                       &table->ACPILevel.MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Cannot find ACPI VDDC voltage value "
+                       "in Clock Dependency Table",
+                       );
+
 
        result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency,  &(table->ACPILevel.SclkSetting));
        PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
@@ -1464,24 +1507,18 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
        CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
        CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
 
-       if (!data->mclk_dpm_key_disabled) {
-               /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
-               table->MemoryACPILevel.MclkFrequency =
-                               data->dpm_table.mclk_table.dpm_levels[0].value;
-               result = polaris10_get_dependency_volt_by_clk(hwmgr,
-                               table_info->vdd_dep_on_mclk,
-                               table->MemoryACPILevel.MclkFrequency,
-                               &table->MemoryACPILevel.MinVoltage, &mvdd);
-               PP_ASSERT_WITH_CODE((0 == result),
-                               "Cannot find ACPI VDDCI voltage value "
-                               "in Clock Dependency Table",
-                               );
-       } else {
-               table->MemoryACPILevel.MclkFrequency =
-                               data->vbios_boot_state.mclk_bootup_value;
-               table->MemoryACPILevel.MinVoltage =
-                               data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
-       }
+
+       /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+       table->MemoryACPILevel.MclkFrequency =
+                       data->dpm_table.mclk_table.dpm_levels[0].value;
+       result = polaris10_get_dependency_volt_by_clk(hwmgr,
+                       table_info->vdd_dep_on_mclk,
+                       table->MemoryACPILevel.MclkFrequency,
+                       &table->MemoryACPILevel.MinVoltage, &mvdd);
+       PP_ASSERT_WITH_CODE((0 == result),
+                       "Cannot find ACPI VDDCI voltage value "
+                       "in Clock Dependency Table",
+                       );
 
        us_mvdd = 0;
        if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
@@ -1526,6 +1563,7 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
        struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
                        table_info->mm_dep_table;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
 
        table->VceLevelCount = (uint8_t)(mm_table->count);
        table->VceBootLevel = 0;
@@ -1535,9 +1573,18 @@ static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
                table->VceLevel[count].MinVoltage = 0;
                table->VceLevel[count].MinVoltage |=
                                (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+               if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
                table->VceLevel[count].MinVoltage |=
-                               ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
-                                               VOLTAGE_SCALE) << VDDCI_SHIFT;
+                               (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
                table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
 
                /*retrieve divider value for VBIOS */
@@ -1566,6 +1613,7 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
        struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
                        table_info->mm_dep_table;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
 
        table->SamuBootLevel = 0;
        table->SamuLevelCount = (uint8_t)(mm_table->count);
@@ -1576,8 +1624,16 @@ static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
                table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
                table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
                                VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
                table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
 
                /* retrieve divider value for VBIOS */
@@ -1660,6 +1716,7 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
        struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
                        table_info->mm_dep_table;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       uint32_t vddci;
 
        table->UvdLevelCount = (uint8_t)(mm_table->count);
        table->UvdBootLevel = 0;
@@ -1670,8 +1727,16 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
                table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
                table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
                                VOLTAGE_SCALE) << VDDC_SHIFT;
-               table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
-                               data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+                       vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+                                               mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+               else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+                       vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+               else
+                       vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+               table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
                table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
 
                /* retrieve divider value for VBIOS */
@@ -1692,8 +1757,8 @@ static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
                CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
                CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
                CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
        }
+
        return result;
 }
 
@@ -1761,12 +1826,9 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
 
 static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
 {
-       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
-                       volt_with_cks, value;
-       uint16_t clock_freq_u16;
+       uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
-                       volt_offset = 0;
+       uint8_t i, stretch_amount, volt_offset = 0;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1778,56 +1840,47 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
         * if the part is SS or FF. if RO >= 1660MHz, part is FF.
         */
        efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (146 * 4));
-       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (148 * 4));
+                       ixSMU_EFUSE_0 + (67 * 4));
        efuse &= 0xFF000000;
        efuse = efuse >> 24;
-       efuse2 &= 0xF;
 
-       if (efuse2 == 1)
-               ro = (2300 - 1350) * efuse / 255 + 1350;
-       else
-               ro = (2500 - 1000) * efuse / 255 + 1000;
-
-       if (ro >= 1660)
-               type = 0;
-       else
-               type = 1;
+       if (hwmgr->chip_id == CHIP_POLARIS10) {
+               min = 1000;
+               max = 2300;
+       } else {
+               min = 1100;
+               max = 2100;
+       }
 
-       /* Populate Stretch amount */
-       data->smc_state_table.ClockStretcherAmount = stretch_amount;
+       ro = efuse * (max -min)/255 + min;
 
        /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
        for (i = 0; i < sclk_table->count; i++) {
                data->smc_state_table.Sclk_CKS_masterEn0_7 |=
                                sclk_table->entries[i].cks_enable << i;
-               volt_without_cks = (uint32_t)((14041 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
-                       (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
-               volt_with_cks = (uint32_t)((13946 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
-                       (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+               if (hwmgr->chip_id == CHIP_POLARIS10) {
+                       volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
+                                               (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+                       volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+                                       (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+               } else {
+                       volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
+                                               (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+                       volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+                                       (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+               }
+
                if (volt_without_cks >= volt_with_cks)
                        volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
-                                       sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+                                       sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
                data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
        }
 
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       STRETCH_ENABLE, 0x0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x1);
-       /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x0);
-
+       data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
        /* Populate CKS Lookup Table */
-       if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
-               stretch_amount2 = 0;
-       else if (stretch_amount == 3 || stretch_amount == 4)
-               stretch_amount2 = 1;
-       else {
+       if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
+                       stretch_amount != 4 && stretch_amount != 5) {
                phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_ClockStretcher);
                PP_ASSERT_WITH_CODE(false,
@@ -1835,69 +1888,6 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
                                return -EINVAL);
        }
 
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL);
-       value &= 0xFFC2FF87;
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
-                       polaris10_clock_stretcher_lookup_table[stretch_amount2][0];
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
-                       polaris10_clock_stretcher_lookup_table[stretch_amount2][1];
-       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
-                       GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100);
-       if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16
-       && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) {
-               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
-               value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
-               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
-               value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
-               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
-               value |= (polaris10_clock_stretch_amount_conversion
-                               [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]]
-                                [stretch_amount]) << 3;
-       }
-       CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq);
-       CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq);
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
-                       polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
-                       (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL, value);
-
-       /* Populate DDT Lookup Table */
-       for (i = 0; i < 4; i++) {
-               /* Assign the minimum and maximum VID stored
-                * in the last row of Clock Stretcher Voltage Table.
-                */
-               data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID =
-                               (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2];
-               data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID =
-                               (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3];
-               /* Loop through each SCLK and check the frequency
-                * to see if it lies within the frequency for clock stretcher.
-                */
-               for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
-                       cks_setting = 0;
-                       clock_freq = PP_SMC_TO_HOST_UL(
-                                       data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency);
-                       /* Check the allowed frequency against the sclk level[j].
-                        *  Sclk's endianness has already been converted,
-                        *  and it's in 10Khz unit,
-                        *  as opposed to Data table, which is in Mhz unit.
-                        */
-                       if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) {
-                               cks_setting |= 0x2;
-                               if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100)
-                                       cks_setting |= 0x1;
-                       }
-                       data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting
-                                                       |= cks_setting << (j * 2);
-               }
-               CONVERT_FROM_HOST_TO_SMC_US(
-                       data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting);
-       }
-
        value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
        value &= 0xFFFFFFFE;
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
@@ -1945,9 +1935,8 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
        if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
                config = VR_SVI2_PLANE_2;
                table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
-       } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
-               config = VR_SMIO_PATTERN_2;
-               table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
+                       offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
        } else {
                config = VR_STATIC_VOLTAGE;
                table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
@@ -1956,6 +1945,90 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+
+int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
+       int result = 0;
+       struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+       AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+       AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+       uint32_t tmp, i;
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+
+
+       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+               return result;
+
+       result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+       if (0 == result) {
+               table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+               table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+               table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+               table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+               table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+               table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+               table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+               table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+               table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+               table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
+               table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+               table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+               table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+               table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
+               table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+               AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+               AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+               AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+               AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+               AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+               AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+               AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+               for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+                       AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+                       AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+               }
+
+               result = polaris10_read_smc_sram_dword(smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+                               &tmp, data->sram_end);
+
+               polaris10_copy_bytes_to_smc(smumgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_meanNsigma,
+                                       sizeof(AVFS_meanNsigma_t),
+                                       data->sram_end);
+
+               result = polaris10_read_smc_sram_dword(smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+                               &tmp, data->sram_end);
+               polaris10_copy_bytes_to_smc(smumgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_SclkOffset,
+                                       sizeof(AVFS_Sclk_Offset_t),
+                                       data->sram_end);
+
+               data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+               data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+       }
+       return result;
+}
+
+
 /**
 * Initializes the SMC table and uploads it
 *
@@ -2056,6 +2129,10 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
                                "Failed to populate Clock Stretcher Data Table!",
                                return result);
        }
+
+       result = polaris10_populate_avfs_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
        table->CurrSclkPllRange = 0xff;
        table->GraphicsVoltageChangeEnable  = 1;
        table->GraphicsThermThrottleEnable  = 1;
@@ -2229,6 +2306,17 @@ static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_ulv_parm *ulv = &(data->ulv);
+
+       if (ulv->ulv_supported)
+               return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+
+       return 0;
+}
+
 static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
 {
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -2249,9 +2337,27 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_SclkDeepSleep)) {
+               if (smum_send_msg_to_smc(hwmgr->smumgr,
+                               PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+                       PP_ASSERT_WITH_CODE(false,
+                                       "Attempt to disable Master Deep Sleep switch failed!",
+                                       return -1);
+               }
+       }
+
+       return 0;
+}
+
 static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       uint32_t soft_register_value = 0;
+       uint32_t handshake_disables_offset = data->soft_regs_start
+                               + offsetof(SMU74_SoftRegisters, HandshakeDisables);
 
        /* enable SCLK dpm */
        if (!data->sclk_dpm_key_disabled)
@@ -2262,6 +2368,12 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 
        /* enable MCLK dpm */
        if (0 == data->mclk_dpm_key_disabled) {
+/* Disable UVD - SMU handshake for MCLK. */
+               soft_register_value = cgs_read_ind_register(hwmgr->device,
+                                       CGS_IND_REG__SMC, handshake_disables_offset);
+               soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                               handshake_disables_offset, soft_register_value);
 
                PP_ASSERT_WITH_CODE(
                                (0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -2269,7 +2381,6 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Failed to enable MCLK DPM during DPM Start Function!",
                                return -1);
 
-
                PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
 
                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
@@ -2338,6 +2449,58 @@ static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+       /* disable SCLK dpm */
+       if (!data->sclk_dpm_key_disabled)
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_DPM_Disable) == 0),
+                               "Failed to disable SCLK DPM!",
+                               return -1);
+
+       /* disable MCLK dpm */
+       if (!data->mclk_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_MCLKDPM_Disable) == 0),
+                               "Failed to disable MCLK DPM!",
+                               return -1);
+       }
+
+       return 0;
+}
+
+static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+       /* disable general power management */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+                       GLOBAL_PWRMGT_EN, 0);
+       /* disable sclk deep sleep */
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+                       DYNAMIC_PM_EN, 0);
+
+       /* disable PCIE dpm */
+       if (!data->pcie_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(
+                               (smum_send_msg_to_smc(hwmgr->smumgr,
+                                               PPSMC_MSG_PCIeDPM_Disable) == 0),
+                               "Failed to disable pcie DPM during DPM Stop Function!",
+                               return -1);
+       }
+
+       if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
+               printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
+               return -1;
+       }
+
+       return 0;
+}
+
 static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
 {
        bool protection;
@@ -2395,6 +2558,23 @@ static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
        return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
 }
 
+static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+               PHM_AutoThrottleSource source)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+       if (data->active_auto_throttle_sources & (1 << source)) {
+               data->active_auto_throttle_sources &= ~(1 << source);
+               polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+       }
+       return 0;
+}
+
+static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+       return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
 int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2471,6 +2651,8 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
 
+       smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+
        tmp_result = polaris10_enable_sclk_control(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to enable SCLK control!", result = tmp_result);
@@ -2516,8 +2698,60 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 
 int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
+       int tmp_result, result = 0;
 
-       return 0;
+       tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
+       PP_ASSERT_WITH_CODE(tmp_result == 0,
+                       "DPM is not running right now, no need to disable DPM!",
+                       return 0);
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_ThermalController))
+               PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                               GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
+
+       tmp_result = polaris10_disable_power_containment(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable power containment!", result = tmp_result);
+
+       tmp_result = polaris10_disable_smc_cac(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable SMC CAC!", result = tmp_result);
+
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
+       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+                       GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
+
+       tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable thermal auto throttle!", result = tmp_result);
+
+       tmp_result = polaris10_stop_dpm(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to stop DPM!", result = tmp_result);
+
+       tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable deep sleep master switch!", result = tmp_result);
+
+       tmp_result = polaris10_disable_ulv(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to disable ULV!", result = tmp_result);
+
+       tmp_result = polaris10_clear_voting_clients(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to clear voting clients!", result = tmp_result);
+
+       tmp_result = polaris10_reset_to_default(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to reset to default!", result = tmp_result);
+
+       tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
+       PP_ASSERT_WITH_CODE((tmp_result == 0),
+                       "Failed to force to switch arbf0!", result = tmp_result);
+
+       return result;
 }
 
 int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
@@ -2528,13 +2762,6 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
 
 int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (data->soft_pp_table) {
-               kfree(data->soft_pp_table);
-               data->soft_pp_table = NULL;
-       }
-
        return phm_hwmgr_backend_fini(hwmgr);
 }
 
@@ -2590,8 +2817,13 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
        phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
                                                PHM_PlatformCaps_TCPRamping);
 
-       phm_cap_set(hwmgr->platform_descriptor.platformCaps,
-                                       PHM_PlatformCaps_PowerContainment);
+       if (hwmgr->powercontainment_enabled)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                           PHM_PlatformCaps_PowerContainment);
+       else
+               phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+                           PHM_PlatformCaps_PowerContainment);
+
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                                        PHM_PlatformCaps_CAC);
 
@@ -2606,6 +2838,7 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
 
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                                PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
        if (hwmgr->chip_id == CHIP_POLARIS11)
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_SPLLShutdownSupport);
@@ -2638,7 +2871,7 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        uint16_t vv_id;
-       uint16_t vddc = 0;
+       uint32_t vddc = 0;
        uint16_t i, j;
        uint32_t sclk = 0;
        struct phm_ppt_v1_information *table_info =
@@ -2662,15 +2895,16 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
                                }
                        }
 
+                       if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
+                                               VOLTAGE_TYPE_VDDC,
+                                               sclk, vv_id, &vddc) != 0) {
+                               printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
+                               continue;
+                       }
 
-                       PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
-                                                       VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
-                                               "Error retrieving EVV voltage value!",
-                                               continue);
-
-
-                       /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
-                       PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
+                       /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
+                        * real voltage level in unit of 0.01mv */
+                       PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
                                        "Invalid VDDC value", result = -EINVAL;);
 
                        /* the voltage should not be zero nor equal to leakage ID */
@@ -2896,15 +3130,46 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+{
+       struct phm_ppt_v1_information *table_info =
+                      (struct phm_ppt_v1_information *)(hwmgr->pptable);
+       struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+                       table_info->vdd_dep_on_mclk;
+       struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+                       table_info->vddc_lookup_table;
+       uint32_t i;
+
+       if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
+               if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
+                       return 0;
+
+               for (i = 0; i < lookup_table->count; i++) {
+                       if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
+                               dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
+                               return 0;
+                       }
+               }
+       }
+       return 0;
+}
+
+
 int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_hwmgr *data;
        struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
        uint32_t temp_reg;
        int result;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
+       data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       hwmgr->backend = data;
+
        data->dll_default_on = false;
        data->sram_end = SMC_RAM_END;
        data->mclk_dpm0_activity_target = 0xa;
@@ -2938,6 +3203,11 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
        data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
 
+       data->enable_tdc_limit_feature = true;
+       data->enable_pkg_pwr_tracking_feature = true;
+       data->force_pcie_gen = PP_PCIEGenInvalid;
+       data->mclk_stutter_mode_threshold = 40000;
+
        if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
                data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
@@ -2962,8 +3232,13 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                        data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
        }
 
+       if (table_info->cac_dtp_table->usClockStretchAmount != 0)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_ClockStretcher);
+
        polaris10_set_features_platform_caps(hwmgr);
 
+       polaris10_patch_voltage_workaround(hwmgr);
        polaris10_init_dpm_defaults(hwmgr);
 
        /* Get leakage voltage based on leakage ID. */
@@ -3068,7 +3343,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_gen_cap = 0x30007;
+                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
                else
                        data->pcie_gen_cap = (uint32_t)sys_info.value;
                if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -3077,7 +3352,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_lane_cap = 0x2f0000;
+                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
                else
                        data->pcie_lane_cap = (uint32_t)sys_info.value;
 
@@ -3520,10 +3795,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
        ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
        ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
                        (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-       ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-                       (ATOM_Tonga_SCLK_Dependency_Table *)
+       PPTable_Generic_SubTable_Header *sclk_dep_table =
+                       (PPTable_Generic_SubTable_Header *)
                        (((unsigned long)powerplay_table) +
                                le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
        ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
                        (ATOM_Tonga_MCLK_Dependency_Table *)
                        (((unsigned long)powerplay_table) +
@@ -3575,7 +3851,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
        /* Performance levels are arranged from low to high. */
        performance_level->memory_clock = mclk_dep_table->entries
                        [state_entry->ucMemoryClockIndexLow].ulMclk;
-       performance_level->engine_clock = sclk_dep_table->entries
+       if (sclk_dep_table->ucRevId == 0)
+               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexLow].ulSclk;
+       else if (sclk_dep_table->ucRevId == 1)
+               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
                        [state_entry->ucEngineClockIndexLow].ulSclk;
        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
                        state_entry->ucPCIEGenLow);
@@ -3586,8 +3866,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
                        [polaris10_power_state->performance_level_count++]);
        performance_level->memory_clock = mclk_dep_table->entries
                        [state_entry->ucMemoryClockIndexHigh].ulMclk;
-       performance_level->engine_clock = sclk_dep_table->entries
+
+       if (sclk_dep_table->ucRevId == 0)
+               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexHigh].ulSclk;
+       else if (sclk_dep_table->ucRevId == 1)
+               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
                        [state_entry->ucEngineClockIndexHigh].ulSclk;
+
        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
                        state_entry->ucPCIEGenHigh);
        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
@@ -3645,7 +3931,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
                switch (state->classification.ui_label) {
                case PP_StateUILabel_Performance:
                        data->use_pcie_performance_levels = true;
-
                        for (i = 0; i < ps->performance_level_count; i++) {
                                if (data->pcie_gen_performance.max <
                                                ps->performance_levels[i].pcie_gen)
@@ -3661,7 +3946,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
                                                ps->performance_levels[i].pcie_lane)
                                        data->pcie_lane_performance.max =
                                                        ps->performance_levels[i].pcie_lane;
-
                                if (data->pcie_lane_performance.min >
                                                ps->performance_levels[i].pcie_lane)
                                        data->pcie_lane_performance.min =
@@ -4187,12 +4471,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
        if (!bgate) {
-               data->smc_state_table.SamuBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
+               data->smc_state_table.SamuBootLevel = 0;
                mm_boot_level_offset = data->dpm_table_start +
                                offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
                mm_boot_level_offset /= 4;
@@ -4327,6 +4608,15 @@ static int polaris10_notify_link_speed_change_after_state_change(
        return 0;
 }
 
+static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+               (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+       return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ?  0 : -EINVAL;
+}
+
 static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
 {
        int tmp_result, result = 0;
@@ -4375,6 +4665,11 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i
                        "Failed to program memory timing parameters!",
                        result = tmp_result);
 
+       tmp_result = polaris10_notify_smc_display(hwmgr);
+       PP_ASSERT_WITH_CODE((0 == tmp_result),
+                       "Failed to notify smc display settings!",
+                       result = tmp_result);
+
        tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
        PP_ASSERT_WITH_CODE((0 == tmp_result),
                        "Failed to unfreeze SCLK MCLK DPM!",
@@ -4409,6 +4704,7 @@ static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_
                        PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
 }
 
+
 int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
 {
        PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
@@ -4428,8 +4724,6 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
 
        if (num_active_displays > 1)  /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
                polaris10_notify_smc_display_change(hwmgr, false);
-       else
-               polaris10_notify_smc_display_change(hwmgr, true);
 
        return 0;
 }
@@ -4470,6 +4764,8 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
        frame_time_in_us = 1000000 / refresh_rate;
 
        pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+       data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
        display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
 
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
@@ -4478,8 +4774,6 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
 
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
 
-       polaris10_notify_smc_display_change(hwmgr, num_active_displays != 0);
-
        return 0;
 }
 
@@ -4591,7 +4885,7 @@ int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
                return 0;
        }
 
-       data->need_long_memory_training = true;
+       data->need_long_memory_training = false;
 
 /*
  *     PPMCME_FirmwareDescriptorEntry *pfd = NULL;
@@ -4721,42 +5015,6 @@ int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
        return result;
 }
 
-static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
-                                             hwmgr->soft_pp_table_size,
-                                             GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       *table = (char *)&data->soft_pp_table;
-
-       return hwmgr->soft_pp_table_size;
-}
-
-static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
-       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       memcpy(data->soft_pp_table, buf, size);
-
-       hwmgr->soft_pp_table = data->soft_pp_table;
-
-       /* TODO: re-init powerplay to implement modified pptable */
-
-       return 0;
-}
-
 static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
@@ -4899,6 +5157,89 @@ static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
                                CG_FDO_CTRL2, FDO_PWM_MODE);
 }
 
+static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+       struct polaris10_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       int value;
+
+       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+                       100 /
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return value;
+}
+
+static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       struct pp_power_state  *ps;
+       struct polaris10_power_state  *polaris10_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+       polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+                       value / 100 +
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return 0;
+}
+
+static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+       struct polaris10_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       int value;
+
+       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+                       100 /
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return value;
+}
+
+static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       struct polaris10_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       struct pp_power_state  *ps;
+       struct polaris10_power_state  *polaris10_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+       polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+                       value / 100 +
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return 0;
+}
 static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
        .backend_init = &polaris10_hwmgr_backend_init,
        .backend_fini = &polaris10_hwmgr_backend_fini,
@@ -4937,22 +5278,17 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
        .check_states_equal = polaris10_check_states_equal,
        .set_fan_control_mode = polaris10_set_fan_control_mode,
        .get_fan_control_mode = polaris10_get_fan_control_mode,
-       .get_pp_table = polaris10_get_pp_table,
-       .set_pp_table = polaris10_set_pp_table,
        .force_clock_level = polaris10_force_clock_level,
        .print_clock_levels = polaris10_print_clock_levels,
        .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
+       .get_sclk_od = polaris10_get_sclk_od,
+       .set_sclk_od = polaris10_set_sclk_od,
+       .get_mclk_od = polaris10_get_mclk_od,
+       .set_mclk_od = polaris10_set_mclk_od,
 };
 
 int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
-       struct polaris10_hwmgr  *data;
-
-       data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
-
-       hwmgr->backend = data;
        hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
        hwmgr->pptable_func = &tonga_pptable_funcs;
        pp_polaris10_thermal_initialize(hwmgr);
index beedf35cbfa67974eba8758721b9a4fb227c9a69..dbc6d9bfd5af58c26f581e3a507f9b57d819c6dd 100644 (file)
@@ -309,9 +309,9 @@ struct polaris10_hwmgr {
        uint32_t                           up_hyst;
        uint32_t disable_dpm_mask;
        bool apply_optimized_settings;
-
-       /* soft pptable for re-uploading into smu */
-       void *soft_pp_table;
+       uint32_t                              avfs_vdroop_override_setting;
+       bool                                  apply_avfs_cks_off_voltage;
+       uint32_t                              frame_time_x2;
 };
 
 /* To convert to Q8.8 format for firmware */
index 0b99ab3ba0c5063ca8d3cd70c91e3bab0c3f5ab1..5620e268b55307ecabd1e0bd9287ea37148a3f1b 100644 (file)
@@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
 
                if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
                                (uint8_t *)&data->power_tune_table,
-                               sizeof(struct SMU74_Discrete_PmFuses), data->sram_end))
+                               (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
                        PP_ASSERT_WITH_CODE(false,
                                        "Attempt to download PmFuseTable Failed!",
                                        return -EINVAL);
@@ -312,6 +312,23 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_CAC) && data->cac_enabled) {
+               int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                               (uint16_t)(PPSMC_MSG_DisableCac));
+               PP_ASSERT_WITH_CODE((smc_result == 0),
+                               "Failed to disable CAC in SMC.", result = -1);
+
+               data->cac_enabled = false;
+       }
+       return result;
+}
+
 int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -373,6 +390,48 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
        return result;
 }
 
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       int result = 0;
+
+       if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+                       PHM_PlatformCaps_PowerContainment) &&
+                       data->power_containment_features) {
+               int smc_result;
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_TDCLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable TDCLimit in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_DTE) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_DisableDTE));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable DTE in SMC.",
+                                       result = smc_result);
+               }
+
+               if (data->power_containment_features &
+                               POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+                       smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+                                       (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+                       PP_ASSERT_WITH_CODE((smc_result == 0),
+                                       "Failed to disable PkgPwrTracking in SMC.",
+                                       result = smc_result);
+               }
+               data->power_containment_features = 0;
+       }
+
+       return result;
+}
+
 int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
 {
        struct phm_ppt_v1_information *table_info =
index 68bc1cb6d40ca0446340d74e28067a073a8a3c6d..d492d6d288679e6545408864b390baadd71329cc 100644 (file)
@@ -31,6 +31,19 @@ enum polaris10_pt_config_reg_type {
        POLARIS10_CONFIGREG_MAX
 };
 
+#define DIDT_SQ_CTRL0__UNUSED_0_MASK    0xfffc0000
+#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT  0x12
+#define DIDT_TD_CTRL0__UNUSED_0_MASK    0xfffc0000
+#define DIDT_TD_CTRL0__UNUSED_0__SHIFT  0x12
+#define DIDT_TCP_CTRL0__UNUSED_0_MASK   0xfffc0000
+#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
+#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
+#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK                 0xc0000000
+#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT               0x0000001e
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK                0xc0000000
+#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT              0x0000001e
+
 /* PowerContainment Features */
 #define POWERCONTAINMENT_FEATURE_DTE             0x00000001
 #define POWERCONTAINMENT_FEATURE_TDCLimit        0x00000002
@@ -62,7 +75,9 @@ void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
 int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
 int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
 int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
 int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
+int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
 int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
 int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
 
index aba167f7d16797a6c47cb23e79215bcc4fce258b..b206632d4650f7ca69d72fad9d1eceaadf5043d3 100644 (file)
@@ -625,10 +625,14 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
        int ret;
        struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
        struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 
-       if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
                return 0;
 
+       ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
        ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
                        0 : -1;
 
index 58742e0d14924bb3b0e994c03f8b98f898769081..a3c38bbd1e94d5596ba9190772d40b1fcb5c28db 100644 (file)
@@ -44,6 +44,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index)
        return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
 }
 
+bool acpi_atcs_notify_pcie_device_ready(void *device)
+{
+       int32_t temp_buffer = 1;
+
+       return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
+                               ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
+                                               &temp_buffer,
+                                               NULL,
+                                               0,
+                                               sizeof(temp_buffer),
+                                               0);
+}
+
+
 int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
 {
        struct atcs_pref_req_input atcs_input;
@@ -52,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
        int result;
        struct cgs_system_info info = {0};
 
-       if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST))
+       if( 0 != acpi_atcs_notify_pcie_device_ready(device))
                return -EINVAL;
 
        info.size = sizeof(struct cgs_system_info);
@@ -77,7 +91,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
                                                ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
                                                &atcs_input,
                                                &atcs_output,
-                                               0,
+                                               1,
                                                sizeof(atcs_input),
                                                sizeof(atcs_output));
                if (result != 0)
index da9f5f1b6dc2ccd00cd7f2508c316b189ccec657..e2aece361eb0f68c14300f2807f18cc909358dc4 100644 (file)
@@ -179,13 +179,12 @@ int atomctrl_set_engine_dram_timings_rv770(
 
        /* They are both in 10KHz Units. */
        engine_clock_parameters.ulTargetEngineClock =
-               (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK;
-       engine_clock_parameters.ulTargetEngineClock |=
-               (COMPUTE_ENGINE_PLL_PARAM << 24);
+               cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
+                           ((COMPUTE_ENGINE_PLL_PARAM << 24)));
 
        /* in 10 khz units.*/
        engine_clock_parameters.sReserved.ulClock =
-               (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK;
+               cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
        return cgs_atom_exec_cmd_table(hwmgr->device,
                        GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
                        &engine_clock_parameters);
@@ -252,7 +251,7 @@ int atomctrl_get_memory_pll_dividers_si(
        COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
        int result;
 
-       mpll_parameters.ulClock = (uint32_t) clock_value;
+       mpll_parameters.ulClock = cpu_to_le32(clock_value);
        mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
 
        result = cgs_atom_exec_cmd_table
@@ -262,9 +261,9 @@ int atomctrl_get_memory_pll_dividers_si(
 
        if (0 == result) {
                mpll_param->mpll_fb_divider.clk_frac =
-                       mpll_parameters.ulFbDiv.usFbDivFrac;
+                       le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
                mpll_param->mpll_fb_divider.cl_kf =
-                       mpll_parameters.ulFbDiv.usFbDiv;
+                       le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
                mpll_param->mpll_post_divider =
                        (uint32_t)mpll_parameters.ucPostDiv;
                mpll_param->vco_mode =
@@ -300,7 +299,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
        COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
        int result;
 
-       mpll_parameters.ulClock.ulClock = (uint32_t)clock_value;
+       mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
 
        result = cgs_atom_exec_cmd_table(hwmgr->device,
                        GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
@@ -320,7 +319,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
        COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
        int result;
 
-       pll_parameters.ulClock = clock_value;
+       pll_parameters.ulClock = cpu_to_le32(clock_value);
 
        result = cgs_atom_exec_cmd_table
                (hwmgr->device,
@@ -329,7 +328,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
 
        if (0 == result) {
                dividers->pll_post_divider = pll_parameters.ucPostDiv;
-               dividers->real_clock = pll_parameters.ulClock;
+               dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
        }
 
        return result;
@@ -343,7 +342,7 @@ int atomctrl_get_engine_pll_dividers_vi(
        COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
        int result;
 
-       pll_patameters.ulClock.ulClock = clock_value;
+       pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
        pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
 
        result = cgs_atom_exec_cmd_table
@@ -355,12 +354,12 @@ int atomctrl_get_engine_pll_dividers_vi(
                dividers->pll_post_divider =
                        pll_patameters.ulClock.ucPostDiv;
                dividers->real_clock =
-                       pll_patameters.ulClock.ulClock;
+                       le32_to_cpu(pll_patameters.ulClock.ulClock);
 
                dividers->ul_fb_div.ul_fb_div_frac =
-                       pll_patameters.ulFbDiv.usFbDivFrac;
+                       le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
                dividers->ul_fb_div.ul_fb_div =
-                       pll_patameters.ulFbDiv.usFbDiv;
+                       le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
 
                dividers->uc_pll_ref_div =
                        pll_patameters.ucPllRefDiv;
@@ -380,7 +379,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
        COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
        int result;
 
-       pll_patameters.ulClock.ulClock = clock_value;
+       pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
        pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
 
        result = cgs_atom_exec_cmd_table
@@ -412,7 +411,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
        COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
        int result;
 
-       pll_patameters.ulClock.ulClock = clock_value;
+       pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
        pll_patameters.ulClock.ucPostDiv =
                COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
 
@@ -425,12 +424,12 @@ int atomctrl_get_dfs_pll_dividers_vi(
                dividers->pll_post_divider =
                        pll_patameters.ulClock.ucPostDiv;
                dividers->real_clock =
-                       pll_patameters.ulClock.ulClock;
+                       le32_to_cpu(pll_patameters.ulClock.ulClock);
 
                dividers->ul_fb_div.ul_fb_div_frac =
-                       pll_patameters.ulFbDiv.usFbDivFrac;
+                       le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
                dividers->ul_fb_div.ul_fb_div =
-                       pll_patameters.ulFbDiv.usFbDiv;
+                       le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
 
                dividers->uc_pll_ref_div =
                        pll_patameters.ucPllRefDiv;
@@ -519,13 +518,13 @@ int atomctrl_get_voltage_table_v3(
 
        for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
                voltage_table->entries[i].value =
-                       voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue;
+                       le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
                voltage_table->entries[i].smio_low =
-                       voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId;
+                       le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
        }
 
        voltage_table->mask_low    =
-               voltage_object->asGpioVoltageObj.ulGpioMaskVal;
+               le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
        voltage_table->count      =
                voltage_object->asGpioVoltageObj.ucGpioEntryNum;
        voltage_table->phase_delay =
@@ -552,13 +551,13 @@ static bool atomctrl_lookup_gpio_pin(
                                pin_assignment->ucGpioPinBitShift;
                        gpio_pin_assignment->us_gpio_pin_aindex =
                                le16_to_cpu(pin_assignment->usGpioPin_AIndex);
-                       return false;
+                       return true;
                }
 
                offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
        }
 
-       return true;
+       return false;
 }
 
 /**
@@ -592,12 +591,12 @@ bool atomctrl_get_pp_assign_pin(
                const uint32_t pinId,
                pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
 {
-       bool bRet = 0;
+       bool bRet = false;
        ATOM_GPIO_PIN_LUT *gpio_lookup_table =
                get_gpio_lookup_table(hwmgr->device);
 
        PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
-                       "Could not find GPIO lookup Table in BIOS.", return -1);
+                       "Could not find GPIO lookup Table in BIOS.", return false);
 
        bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
                gpio_pin_assignment);
@@ -650,8 +649,8 @@ int atomctrl_calculate_voltage_evv_on_sclk(
                return -1;
 
        if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
-                       (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
-                       getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
+           (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
+            getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
                return -1;
 
        /*-----------------------------------------------------------
@@ -662,37 +661,37 @@ int atomctrl_calculate_voltage_evv_on_sclk(
 
        switch (dpm_level) {
        case 1:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
                break;
        case 2:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
                break;
        case 3:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
                break;
        case 4:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
                break;
        case 5:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
                break;
        case 6:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
                break;
        case 7:
-               fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000);
+               fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7));
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
                break;
        default:
                printk(KERN_ERR "DPM Level not supported\n");
                fPowerDPMx = Convert_ULONG_ToFraction(1);
-               fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000);
+               fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
        }
 
        /*-------------------------
@@ -716,9 +715,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
                return result;
 
        /* Finally, the actual fuse value */
-       ul_RO_fused = sOutput_FuseValues.ulEfuseValue;
-       fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1);
-       fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1);
+       ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
+       fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
        fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
 
        sCACm_fuse = getASICProfilingInfo->sCACm;
@@ -736,9 +735,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_CACm_fused = sOutput_FuseValues.ulEfuseValue;
-       fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000);
-       fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000);
+       ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
+       fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
 
        fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
 
@@ -756,9 +755,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_CACb_fused = sOutput_FuseValues.ulEfuseValue;
-       fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000);
-       fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000);
+       ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
+       fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
 
        fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
 
@@ -777,9 +776,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue;
-       fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000);
-       fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000);
+       ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
+       fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
 
        fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
                        fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
@@ -798,9 +797,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue;
-       fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000);
-       fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000);
+       ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
+       fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
        fRange = fMultiply(fRange, ConvertToFraction(-1));
 
        fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
@@ -820,9 +819,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue;
-       fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000);
-       fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000);
+       ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
+       fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
 
        fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
                        fAverage, fRange, sKv_b_fuse.ucEfuseLength);
@@ -851,9 +850,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        if (result)
                return result;
 
-       ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue;
-       fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000);
-       fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000);
+       ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
+       fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
+       fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
 
        fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
                        fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
@@ -863,40 +862,40 @@ int atomctrl_calculate_voltage_evv_on_sclk(
         * PART 2 - Grabbing all required values
         *-------------------------------------------
         */
-       fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000),
+       fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
-       fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000),
+       fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
-       fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000),
+       fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
-       fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000),
+       fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
-       fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000),
+       fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
-       fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000),
+       fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
-       fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000),
+       fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
-       fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000),
+       fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
                        ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
 
-       fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a);
-       fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b);
-       fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c);
+       fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
+       fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
+       fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
 
-       fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed);
+       fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
 
        fMargin_FMAX_mean = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_Fmax_mean, 10000);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
        fMargin_Plat_mean = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_plat_mean, 10000);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
        fMargin_FMAX_sigma = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_Fmax_sigma, 10000);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
        fMargin_Plat_sigma = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_plat_sigma, 10000);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
 
        fMargin_DC_sigma = GetScaledFraction(
-                       getASICProfilingInfo->ulMargin_DC_sigma, 100);
+               le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
        fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
 
        fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
@@ -908,14 +907,14 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        fSclk = GetScaledFraction(sclk, 100);
 
        fV_max = fDivide(GetScaledFraction(
-                       getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4));
-       fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10);
-       fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100);
-       fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10);
+                                le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
+       fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
+       fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
+       fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
        fV_FT = fDivide(GetScaledFraction(
-                       getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4));
+                               le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
        fV_min = fDivide(GetScaledFraction(
-                       getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4));
+                                le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
 
        /*-----------------------
         * PART 3
@@ -925,7 +924,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
        fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
        fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
        fC_Term = fAdd(fMargin_RO_c,
-                       fAdd(fMultiply(fSM_A0,fLkg_FT),
+                       fAdd(fMultiply(fSM_A0, fLkg_FT),
                        fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
                        fAdd(fMultiply(fSM_A3, fSclk),
                        fSubtract(fSM_A7, fRO_fused)))));
@@ -1063,9 +1062,9 @@ int atomctrl_get_voltage_evv_on_sclk(
        get_voltage_info_param_space.ucVoltageMode   =
                ATOM_GET_VOLTAGE_EVV_VOLTAGE;
        get_voltage_info_param_space.usVoltageLevel  =
-               virtual_voltage_Id;
+               cpu_to_le16(virtual_voltage_Id);
        get_voltage_info_param_space.ulSCLKFreq      =
-               sclk;
+               cpu_to_le32(sclk);
 
        result = cgs_atom_exec_cmd_table(hwmgr->device,
                        GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1074,8 +1073,54 @@ int atomctrl_get_voltage_evv_on_sclk(
        if (0 != result)
                return result;
 
-       *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
-                       (&get_voltage_info_param_space))->usVoltageLevel;
+       *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+                               (&get_voltage_info_param_space))->usVoltageLevel);
+
+       return result;
+}
+
+/**
+ * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table.
+ * @param hwmgr        input: pointer to hwManager
+ * @param virtual_voltage_id      input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
+ * @param voltage                     output: real voltage level in unit of mv
+ */
+int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
+                            uint16_t virtual_voltage_id,
+                            uint16_t *voltage)
+{
+       int result;
+       int entry_id;
+       GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
+
+       /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
+       for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
+               if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
+                       /* found */
+                       break;
+               }
+       }
+
+       PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+               "Can't find requested voltage id in vddc_dependency_on_sclk table!",
+               return -EINVAL;
+       );
+
+       get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
+       get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+       get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
+       get_voltage_info_param_space.ulSCLKFreq =
+               cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
+
+       result = cgs_atom_exec_cmd_table(hwmgr->device,
+                       GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
+                       &get_voltage_info_param_space);
+
+       if (0 != result)
+               return result;
+
+       *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
+                               (&get_voltage_info_param_space))->usVoltageLevel);
 
        return result;
 }
@@ -1165,8 +1210,8 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
 
        if (entry_found) {
                ssEntry->speed_spectrum_percentage =
-                       ssInfo->usSpreadSpectrumPercentage;
-               ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz;
+                       le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
+               ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
 
                if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
                        (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
@@ -1222,7 +1267,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
        int result;
        READ_EFUSE_VALUE_PARAMETER efuse_param;
 
-       efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4;
+       efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
        efuse_param.sEfuse.ucBitShift = (uint8_t)
                        (start_index - ((start_index / 32) * 32));
        efuse_param.sEfuse.ucBitLength  = (uint8_t)
@@ -1232,19 +1277,21 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
                        GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
                        &efuse_param);
        if (!result)
-               *efuse = efuse_param.ulEfuseValue & mask;
+               *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
 
        return result;
 }
 
 int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
-                                                               uint8_t level)
+                             uint8_t level)
 {
        DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
        int result;
 
-       memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK;
-       memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM;
+       memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
+               cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
+       memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
+               cpu_to_le32(ADJUST_MC_SETTING_PARAM);
        memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
 
        result = cgs_atom_exec_cmd_table
@@ -1256,7 +1303,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
 }
 
 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
-                               uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
+                               uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
 {
 
        int result;
@@ -1264,8 +1311,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
 
        get_voltage_info_param_space.ucVoltageType = voltage_type;
        get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
-       get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id;
-       get_voltage_info_param_space.ulSCLKFreq = sclk;
+       get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
+       get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
 
        result = cgs_atom_exec_cmd_table(hwmgr->device,
                        GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1274,7 +1321,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
        if (0 != result)
                return result;
 
-       *voltage = get_voltage_info_param_space.usVoltageLevel;
+       *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
 
        return result;
 }
@@ -1295,10 +1342,57 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
        for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
                table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
                table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
-               table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc;
-               table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper;
-               table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower;
+               table->entry[i].usFcw_pcc =
+                       le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
+               table->entry[i].usFcw_trans_upper =
+                       le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
+               table->entry[i].usRcw_trans_lower =
+                       le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
        }
 
        return 0;
 }
+
+int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
+                                 struct pp_atom_ctrl__avfs_parameters *param)
+{
+       ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
+
+       if (param == NULL)
+               return -EINVAL;
+
+       profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
+                       cgs_atom_get_data_table(hwmgr->device,
+                                       GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
+                                       NULL, NULL, NULL);
+       if (!profile)
+               return -1;
+
+       param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
+       param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
+       param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
+       param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
+       param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
+       param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
+       param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
+       param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
+       param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
+       param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
+       param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
+       param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
+       param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+       param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+       param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+       param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
+       param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
+       param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
+       param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
+       param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
+       param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
+       param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
+       param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
+       param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
+       param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
+
+       return 0;
+}
index d24ebb566905295587b16211cbef4dc1eca0c5fa..fc898afce002fe1c681effcb5ef603de854c2745 100644 (file)
@@ -250,8 +250,38 @@ struct pp_atomctrl_gpio_pin_assignment {
 };
 typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment;
 
+struct pp_atom_ctrl__avfs_parameters {
+       uint32_t  ulAVFS_meanNsigma_Acontant0;
+       uint32_t  ulAVFS_meanNsigma_Acontant1;
+       uint32_t  ulAVFS_meanNsigma_Acontant2;
+       uint16_t usAVFS_meanNsigma_DC_tol_sigma;
+       uint16_t usAVFS_meanNsigma_Platform_mean;
+       uint16_t usAVFS_meanNsigma_Platform_sigma;
+       uint32_t  ulGB_VDROOP_TABLE_CKSOFF_a0;
+       uint32_t  ulGB_VDROOP_TABLE_CKSOFF_a1;
+       uint32_t  ulGB_VDROOP_TABLE_CKSOFF_a2;
+       uint32_t  ulGB_VDROOP_TABLE_CKSON_a0;
+       uint32_t  ulGB_VDROOP_TABLE_CKSON_a1;
+       uint32_t  ulGB_VDROOP_TABLE_CKSON_a2;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+       uint16_t  usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSON_m1;
+       uint16_t  usAVFSGB_FUSE_TABLE_CKSON_m2;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSON_b;
+       uint16_t  usMaxVoltage_0_25mv;
+       uint8_t  ucEnableGB_VDROOP_TABLE_CKSOFF;
+       uint8_t  ucEnableGB_VDROOP_TABLE_CKSON;
+       uint8_t  ucEnableGB_FUSE_TABLE_CKSOFF;
+       uint8_t  ucEnableGB_FUSE_TABLE_CKSON;
+       uint16_t usPSM_Age_ComFactor;
+       uint8_t  ucEnableApplyAVFS_CKS_OFF_Voltage;
+       uint8_t  ucReserved;
+};
+
 extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
 extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage);
 extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
 extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo);
 extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo);
@@ -276,7 +306,10 @@ extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t
 extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
                                                                uint8_t level);
 extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
-                               uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+                               uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
 extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
+
+extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
+
 #endif
 
index 2f1a14fe05b1d9f3ebe6fd950f45888f49b95ec0..6c321b0d8a1eb8483c45bf563f79adb000a87865 100644 (file)
@@ -794,19 +794,35 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
 static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
                                     struct pp_hwmgr *hwmgr)
 {
-       const void *table_addr = NULL;
+       const void *table_addr = hwmgr->soft_pp_table;
        uint8_t frev, crev;
        uint16_t size;
 
-       table_addr = cgs_atom_get_data_table(hwmgr->device,
-                       GetIndexIntoMasterTable(DATA, PowerPlayInfo),
-                       &size, &frev, &crev);
+       if (!table_addr) {
+               table_addr = cgs_atom_get_data_table(hwmgr->device,
+                               GetIndexIntoMasterTable(DATA, PowerPlayInfo),
+                               &size, &frev, &crev);
 
-       hwmgr->soft_pp_table = table_addr;
+               hwmgr->soft_pp_table = table_addr;
+               hwmgr->soft_pp_table_size = size;
+       }
 
        return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
 }
 
+int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
+                               uint32_t *vol_rep_time, uint32_t *bb_rep_time)
+{
+       const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr);
+
+       PP_ASSERT_WITH_CODE(NULL != powerplay_tab,
+                           "Missing PowerPlay Table!", return -EINVAL);
+
+       *vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime);
+       *bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime);
+
+       return 0;
+}
 
 int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
                                     unsigned long *num_of_entries)
@@ -1499,7 +1515,7 @@ int get_number_of_vce_state_table_entries(
        const ATOM_PPLIB_VCE_State_Table *vce_table =
                                    get_vce_state_table(hwmgr, table);
 
-       if (vce_table > 0)
+       if (vce_table)
                return vce_table->numEntries;
 
        return 0;
@@ -1589,11 +1605,6 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr)
 
 static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
 {
-       if (NULL != hwmgr->soft_pp_table) {
-               kfree(hwmgr->soft_pp_table);
-               hwmgr->soft_pp_table = NULL;
-       }
-
        if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
                kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
                hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
index 30434802417e5254ebbe75a070103596bd33f03e..baddaa75693bce2c8948844cb9cf75ee4ae7d533 100644 (file)
@@ -32,16 +32,19 @@ struct pp_hw_power_state;
 extern const struct pp_table_func pptable_funcs;
 
 typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr,
-                                       struct pp_hw_power_state *hw_ps,
-                                                       unsigned int index,
-                                                const void *clock_info);
+                                               struct pp_hw_power_state *hw_ps,
+                                               unsigned int index,
+                                               const void *clock_info);
 
 int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
-                                    unsigned long *num_of_entries);
+                                unsigned long *num_of_entries);
 
 int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
-                                               unsigned long entry_index,
-                                               struct pp_power_state *ps,
-                               pp_tables_hw_clock_info_callback func);
+                       unsigned long entry_index,
+                       struct pp_power_state *ps,
+                       pp_tables_hw_clock_info_callback func);
+
+int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
+                                uint32_t *vol_rep_time, uint32_t *bb_rep_time);
 
 #endif
index 16fed487973b9bd28396d109ba079086b127379c..072b3b68263700699284532674382dc2b13d18e0 100644 (file)
@@ -1302,7 +1302,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        table->Smio[count] |=
                                data->mvdd_voltage_table.entries[count].smio_low;
                }
-               table->SmioMask2 = data->vddci_voltage_table.mask_low;
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
 
                CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
        }
@@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
                }
        }
 
-       /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
-       for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
-               data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
-               /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
-               /* param1 is for corresponding std voltage */
-               data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
-       }
-       data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
-
-       if (NULL != allowed_vdd_mclk_table) {
-               /* Initialize Vddci DPM table based on allow Mclk values */
-               for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
-                       data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
-                       data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
-                       data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
-                       data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
-               }
-               data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
-               data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
-       }
-
        /* setup PCIE gen speed levels*/
        tonga_setup_default_pcie_tables(hwmgr);
 
@@ -3047,8 +3026,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
 
        reg_value = 0;
        if ((0 == reg_value) &&
-               (0 == atomctrl_get_pp_assign_pin(hwmgr,
-                       VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) {
+               (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
+                                               &gpio_pin_assignment))) {
                table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_RegulatorHot);
@@ -3061,8 +3040,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
        /* ACDC Switch GPIO */
        reg_value = 0;
        if ((0 == reg_value) &&
-               (0 == atomctrl_get_pp_assign_pin(hwmgr,
-                       PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) {
+               (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+                                               &gpio_pin_assignment))) {
                table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_AutomaticDCTransition);
@@ -3084,8 +3063,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
        }
 
        reg_value = 0;
-       if ((0 == reg_value) &&
-               (0 == atomctrl_get_pp_assign_pin(hwmgr,
+       if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
                        THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                        PHM_PlatformCaps_ThermalOutGPIO);
@@ -4443,13 +4421,6 @@ int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
 
 int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
 {
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (data->soft_pp_table) {
-               kfree(data->soft_pp_table);
-               data->soft_pp_table = NULL;
-       }
-
        return phm_hwmgr_backend_fini(hwmgr);
 }
 
@@ -4463,7 +4434,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 {
        int result = 0;
        SMU72_Discrete_DpmTable  *table = NULL;
-       tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       tonga_hwmgr *data;
        pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
        struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
        phw_tonga_ulv_parm *ulv;
@@ -4472,6 +4443,12 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        PP_ASSERT_WITH_CODE((NULL != hwmgr),
                "Invalid Parameter!", return -1;);
 
+       data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
+       if (data == NULL)
+               return -ENOMEM;
+
+       hwmgr->backend = data;
+
        data->dll_defaule_on = 0;
        data->sram_end = SMC_RAM_END;
 
@@ -4510,6 +4487,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
        data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
        data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
+       data->force_pcie_gen = PP_PCIEGenInvalid;
 
        if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
                                VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
@@ -4591,7 +4569,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
        * Peak Current Control feature is enabled and we should program PCC HW register
        */
-       if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+       if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
                uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
                                                                                CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
 
@@ -4659,7 +4637,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_gen_cap = 0x30007;
+                       data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
                else
                        data->pcie_gen_cap = (uint32_t)sys_info.value;
                if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -4668,7 +4646,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
                result = cgs_query_system_info(hwmgr->device, &sys_info);
                if (result)
-                       data->pcie_lane_cap = 0x2f0000;
+                       data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
                else
                        data->pcie_lane_cap = (uint32_t)sys_info.value;
        } else {
@@ -6051,42 +6029,6 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
                                CG_FDO_CTRL2, FDO_PWM_MODE);
 }
 
-static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
-                                             hwmgr->soft_pp_table_size,
-                                             GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       *table = (char *)&data->soft_pp_table;
-
-       return hwmgr->soft_pp_table_size;
-}
-
-static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
-{
-       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
-
-       if (!data->soft_pp_table) {
-               data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
-               if (!data->soft_pp_table)
-                       return -ENOMEM;
-       }
-
-       memcpy(data->soft_pp_table, buf, size);
-
-       hwmgr->soft_pp_table = data->soft_pp_table;
-
-       /* TODO: re-init powerplay to implement modified pptable */
-
-       return 0;
-}
-
 static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask)
 {
@@ -6194,11 +6136,96 @@ static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
        return size;
 }
 
+static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+       struct tonga_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       int value;
+
+       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
+                       100 /
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return value;
+}
+
+static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       struct tonga_single_dpm_table *golden_sclk_table =
+                       &(data->golden_dpm_table.sclk_table);
+       struct pp_power_state  *ps;
+       struct tonga_power_state  *tonga_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+       tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
+                       value / 100 +
+                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+
+       return 0;
+}
+
+static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+       struct tonga_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       int value;
+
+       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
+                       100 /
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return value;
+}
+
+static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
+{
+       struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
+       struct tonga_single_dpm_table *golden_mclk_table =
+                       &(data->golden_dpm_table.mclk_table);
+       struct pp_power_state  *ps;
+       struct tonga_power_state  *tonga_ps;
+
+       if (value > 20)
+               value = 20;
+
+       ps = hwmgr->request_ps;
+
+       if (ps == NULL)
+               return -EINVAL;
+
+       tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
+
+       tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
+                       value / 100 +
+                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+
+       return 0;
+}
+
 static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
        .backend_init = &tonga_hwmgr_backend_init,
        .backend_fini = &tonga_hwmgr_backend_fini,
        .asic_setup = &tonga_setup_asic_task,
        .dynamic_state_management_enable = &tonga_enable_dpm_tasks,
+       .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
        .apply_state_adjust_rules = tonga_apply_state_adjust_rules,
        .force_dpm_level = &tonga_force_dpm_level,
        .power_state_set = tonga_set_power_state_tasks,
@@ -6232,22 +6259,16 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
        .check_states_equal = tonga_check_states_equal,
        .set_fan_control_mode = tonga_set_fan_control_mode,
        .get_fan_control_mode = tonga_get_fan_control_mode,
-       .get_pp_table = tonga_get_pp_table,
-       .set_pp_table = tonga_set_pp_table,
        .force_clock_level = tonga_force_clock_level,
        .print_clock_levels = tonga_print_clock_levels,
+       .get_sclk_od = tonga_get_sclk_od,
+       .set_sclk_od = tonga_set_sclk_od,
+       .get_mclk_od = tonga_get_mclk_od,
+       .set_mclk_od = tonga_set_mclk_od,
 };
 
 int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
 {
-       tonga_hwmgr  *data;
-
-       data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
-       memset(data, 0x00, sizeof(tonga_hwmgr));
-
-       hwmgr->backend = data;
        hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
        hwmgr->pptable_func = &tonga_pptable_funcs;
        pp_tonga_thermal_initialize(hwmgr);
index 573cd39fe78d34dd09f0a4b34c074dd160d861bd..3961884bfa9bf688d79e95b2e7d53eba6bea92c5 100644 (file)
@@ -352,9 +352,6 @@ struct tonga_hwmgr {
        bool                           samu_power_gated; /* 1: gated, 0:not gated */
        bool                           acp_power_gated;  /* 1: gated, 0:not gated */
        bool                           pg_acp_init;
-
-       /* soft pptable for re-uploading into smu */
-       void *soft_pp_table;
 };
 
 typedef struct tonga_hwmgr tonga_hwmgr;
index 1b44f4e9b8f55c02fe5faf71ba0687465840fe9d..f127198aafc47a12512ca72bf0a19a2ead685402 100644 (file)
@@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
        ATOM_Tonga_SCLK_Dependency_Record entries[1];                            /* Dynamically allocate entries. */
 } ATOM_Tonga_SCLK_Dependency_Table;
 
+typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+       UCHAR  ucVddInd;                                                                                        /* Base voltage */
+       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
+       ULONG ulSclk;
+       USHORT usEdcCurrent;
+       UCHAR  ucReliabilityTemperature;
+       UCHAR  ucCKSVOffsetandDisable;                  /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+       ULONG  ulSclkOffset;
+} ATOM_Polaris_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                     /* Number of entries. */
+       ATOM_Polaris_SCLK_Dependency_Record entries[1];                          /* Dynamically allocate entries. */
+} ATOM_Polaris_SCLK_Dependency_Table;
+
 typedef struct _ATOM_Tonga_PCIE_Record {
        UCHAR ucPCIEGenSpeed;
        UCHAR usPCIELaneWidth;
index 10e3630ee39d8d86c4897754274d70b451203328..4662d3d0b7b1c861317e2297aef37cd14fd46263 100644 (file)
@@ -302,7 +302,7 @@ static int init_dpm_2_parameters(
                        (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
 
                if (0 != powerplay_table->usPPMTableOffset) {
-                       if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
+                       if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
                                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_EnablePlatformPowerManagement);
                        }
@@ -408,41 +408,78 @@ static int get_mclk_voltage_dependency_table(
 static int get_sclk_voltage_dependency_table(
                struct pp_hwmgr *hwmgr,
                phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
-               const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table
+               const PPTable_Generic_SubTable_Header *sclk_dep_table
                )
 {
        uint32_t table_size, i;
        phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
 
-       PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries),
-               "Invalid PowerPlay Table!", return -1);
+       if (sclk_dep_table->ucRevId < 1) {
+               const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
+                           (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
 
-       table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
-               * sclk_dep_table->ucNumEntries;
+               PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
 
-       sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
-               kzalloc(table_size, GFP_KERNEL);
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * tonga_table->ucNumEntries;
 
-       if (NULL == sclk_table)
-               return -ENOMEM;
+               sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+                       kzalloc(table_size, GFP_KERNEL);
 
-       memset(sclk_table, 0x00, table_size);
-
-       sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries;
-
-       for (i = 0; i < sclk_dep_table->ucNumEntries; i++) {
-               sclk_table->entries[i].vddInd =
-                       sclk_dep_table->entries[i].ucVddInd;
-               sclk_table->entries[i].vdd_offset =
-                       sclk_dep_table->entries[i].usVddcOffset;
-               sclk_table->entries[i].clk =
-                       sclk_dep_table->entries[i].ulSclk;
-               sclk_table->entries[i].cks_enable =
-                       (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-               sclk_table->entries[i].cks_voffset =
-                       (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
-       }
+               if (NULL == sclk_table)
+                       return -ENOMEM;
 
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
+
+               for (i = 0; i < tonga_table->ucNumEntries; i++) {
+                       sclk_table->entries[i].vddInd =
+                               tonga_table->entries[i].ucVddInd;
+                       sclk_table->entries[i].vdd_offset =
+                               tonga_table->entries[i].usVddcOffset;
+                       sclk_table->entries[i].clk =
+                               tonga_table->entries[i].ulSclk;
+                       sclk_table->entries[i].cks_enable =
+                               (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table->entries[i].cks_voffset =
+                               (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+               }
+       } else {
+               const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
+                           (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+
+               PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * polaris_table->ucNumEntries;
+
+               sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+                       kzalloc(table_size, GFP_KERNEL);
+
+               if (NULL == sclk_table)
+                       return -ENOMEM;
+
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
+
+               for (i = 0; i < polaris_table->ucNumEntries; i++) {
+                       sclk_table->entries[i].vddInd =
+                               polaris_table->entries[i].ucVddInd;
+                       sclk_table->entries[i].vdd_offset =
+                               polaris_table->entries[i].usVddcOffset;
+                       sclk_table->entries[i].clk =
+                               polaris_table->entries[i].ulSclk;
+                       sclk_table->entries[i].cks_enable =
+                               (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table->entries[i].cks_voffset =
+                               (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+                       sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+               }
+       }
        *pp_tonga_sclk_dep_table = sclk_table;
 
        return 0;
@@ -708,8 +745,8 @@ static int init_clock_voltage_dependency(
        const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
                (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
                le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-       const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-               (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
+       const PPTable_Generic_SubTable_Header *sclk_dep_table =
+               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
                le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
        const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
                (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
@@ -1040,48 +1077,41 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
        struct phm_ppt_v1_information *pp_table_information =
                (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
-       if (NULL != hwmgr->soft_pp_table) {
-               kfree(hwmgr->soft_pp_table);
-               hwmgr->soft_pp_table = NULL;
-       }
-
-       if (NULL != pp_table_information->vdd_dep_on_sclk)
-               pp_table_information->vdd_dep_on_sclk = NULL;
+       kfree(pp_table_information->vdd_dep_on_sclk);
+       pp_table_information->vdd_dep_on_sclk = NULL;
 
-       if (NULL != pp_table_information->vdd_dep_on_mclk)
-               pp_table_information->vdd_dep_on_mclk = NULL;
+       kfree(pp_table_information->vdd_dep_on_mclk);
+       pp_table_information->vdd_dep_on_mclk = NULL;
 
-       if (NULL != pp_table_information->valid_mclk_values)
-               pp_table_information->valid_mclk_values = NULL;
+       kfree(pp_table_information->valid_mclk_values);
+       pp_table_information->valid_mclk_values = NULL;
 
-       if (NULL != pp_table_information->valid_sclk_values)
-               pp_table_information->valid_sclk_values = NULL;
+       kfree(pp_table_information->valid_sclk_values);
+       pp_table_information->valid_sclk_values = NULL;
 
-       if (NULL != pp_table_information->vddc_lookup_table)
-               pp_table_information->vddc_lookup_table = NULL;
+       kfree(pp_table_information->vddc_lookup_table);
+       pp_table_information->vddc_lookup_table = NULL;
 
-       if (NULL != pp_table_information->vddgfx_lookup_table)
-               pp_table_information->vddgfx_lookup_table = NULL;
+       kfree(pp_table_information->vddgfx_lookup_table);
+       pp_table_information->vddgfx_lookup_table = NULL;
 
-       if (NULL != pp_table_information->mm_dep_table)
-               pp_table_information->mm_dep_table = NULL;
+       kfree(pp_table_information->mm_dep_table);
+       pp_table_information->mm_dep_table = NULL;
 
-       if (NULL != pp_table_information->cac_dtp_table)
-               pp_table_information->cac_dtp_table = NULL;
+       kfree(pp_table_information->cac_dtp_table);
+       pp_table_information->cac_dtp_table = NULL;
 
-       if (NULL != hwmgr->dyn_state.cac_dtp_table)
-               hwmgr->dyn_state.cac_dtp_table = NULL;
+       kfree(hwmgr->dyn_state.cac_dtp_table);
+       hwmgr->dyn_state.cac_dtp_table = NULL;
 
-       if (NULL != pp_table_information->ppm_parameter_table)
-               pp_table_information->ppm_parameter_table = NULL;
+       kfree(pp_table_information->ppm_parameter_table);
+       pp_table_information->ppm_parameter_table = NULL;
 
-       if (NULL != pp_table_information->pcie_table)
-               pp_table_information->pcie_table = NULL;
+       kfree(pp_table_information->pcie_table);
+       pp_table_information->pcie_table = NULL;
 
-       if (NULL != hwmgr->pptable) {
-               kfree(hwmgr->pptable);
-               hwmgr->pptable = NULL;
-       }
+       kfree(hwmgr->pptable);
+       hwmgr->pptable = NULL;
 
        return result;
 }
index 50b367d44307c8992b8a17ef78b05c3ede5c09f5..b764c8c05ec8127d64cc7b57ea233eb8cf581c3e 100644 (file)
@@ -132,6 +132,7 @@ struct amd_pp_init {
        uint32_t chip_family;
        uint32_t chip_id;
        uint32_t rev_id;
+       bool powercontainment_enabled;
 };
 enum amd_pp_display_config_type{
        AMD_PP_DisplayConfigType_None = 0,
@@ -342,6 +343,10 @@ struct amd_powerplay_funcs {
        int (*set_pp_table)(void *handle, const char *buf, size_t size);
        int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
        int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+       int (*get_sclk_od)(void *handle);
+       int (*set_sclk_od)(void *handle, uint32_t value);
+       int (*get_mclk_od)(void *handle);
+       int (*set_mclk_od)(void *handle, uint32_t value);
 };
 
 struct amd_powerplay {
@@ -355,6 +360,8 @@ int amd_powerplay_init(struct amd_pp_init *pp_init,
 
 int amd_powerplay_fini(void *handle);
 
+int amd_powerplay_reset(void *handle);
+
 int amd_powerplay_display_configuration_change(void *handle,
                const struct amd_pp_display_configuration *input);
 
index 56f712c7d07aff6051b1c473ca6936b1c1f512a2..962cb538595117421c0b08d5a3fa4b94ceb1f2b1 100644 (file)
@@ -340,6 +340,7 @@ extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
 extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
 extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
 extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
+extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
 extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
 extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
 extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
index 28f571449495d6e639c6135d4394d8eedaea7587..bf0d2accf7bff5d23e8e89895c468481b03fdb46 100644 (file)
@@ -278,6 +278,8 @@ struct pp_hwmgr_func {
 
        int (*dynamic_state_management_enable)(
                                                struct pp_hwmgr *hw_mgr);
+       int (*dynamic_state_management_disable)(
+                                               struct pp_hwmgr *hw_mgr);
 
        int (*patch_boot_state)(struct pp_hwmgr *hwmgr,
                                     struct pp_hw_power_state *hw_ps);
@@ -333,11 +335,13 @@ struct pp_hwmgr_func {
        int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
        int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
        int (*power_off_asic)(struct pp_hwmgr *hwmgr);
-       int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
-       int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
        int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
        int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
        int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+       int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
+       int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
+       int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
+       int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
 };
 
 struct pp_table_func {
@@ -411,6 +415,8 @@ struct phm_cac_tdp_table {
        uint8_t  ucVr_I2C_Line;
        uint8_t  ucPlx_I2C_address;
        uint8_t  ucPlx_I2C_Line;
+       uint32_t usBoostPowerLimit;
+       uint8_t  ucCKS_LDO_REFSEL;
 };
 
 struct phm_ppm_table {
@@ -578,6 +584,7 @@ struct pp_hwmgr {
        struct pp_smumgr *smumgr;
        const void *soft_pp_table;
        uint32_t soft_pp_table_size;
+       void *hardcode_pp_table;
        bool need_pp_table_upload;
        enum amd_dpm_forced_level dpm_level;
        bool block_hw_access;
@@ -607,6 +614,7 @@ struct pp_hwmgr {
        uint32_t num_ps;
        struct pp_thermal_controller_info thermal_controller;
        bool fan_ctrl_is_in_default_mode;
+       bool powercontainment_enabled;
        uint32_t fan_ctrl_default_mode;
        uint32_t tmin;
        struct phm_microcode_version_info microcode_version_info;
index 0c6a413eaa5be9020333e68ec96a35d49a201e6e..b8f4b73c322e1b0a501cef8a774b0c4265f15cb6 100644 (file)
@@ -27,6 +27,7 @@
 
 #pragma pack(push, 1)
 
+#define PPSMC_MSG_SetGBDroopSettings          ((uint16_t) 0x305)
 
 #define PPSMC_SWSTATE_FLAG_DC                           0x01
 #define PPSMC_SWSTATE_FLAG_UVD                          0x02
@@ -391,6 +392,8 @@ typedef uint16_t PPSMC_Result;
 #define PPSMC_MSG_SetGpuPllDfsForSclk         ((uint16_t) 0x300)
 #define PPSMC_MSG_Didt_Block_Function            ((uint16_t) 0x301)
 
+#define PPSMC_MSG_SetVBITimeout               ((uint16_t) 0x306)
+
 #define PPSMC_MSG_SecureSRBMWrite             ((uint16_t) 0x600)
 #define PPSMC_MSG_SecureSRBMRead              ((uint16_t) 0x601)
 #define PPSMC_MSG_SetAddress                  ((uint16_t) 0x800)
index 3bd5e69b90456641c8b318fa89fc74b6d99ab89f..3df5de2cdab0254becae8ab384e4f8feb2cd2c8d 100644 (file)
@@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device,
 extern int acpi_pcie_perf_request(void *device,
                                                uint8_t perf_req,
                                                bool advertise);
+extern bool acpi_atcs_notify_pcie_device_ready(void *device);
index 1a12d85b8e97c05a186f2b99ef889b841098e5ab..fd10a9fa843d03bce96b63f4952f0d41ed84eba7 100644 (file)
 #define SMU__NUM_LCLK_DPM_LEVELS 8
 #define SMU__NUM_PCIE_DPM_LEVELS 8
 
+#define EXP_M1  35
+#define EXP_M2  92821
+#define EXP_B   66629747
+
+#define EXP_M1_1  365
+#define EXP_M2_1  658700
+#define EXP_B_1   305506134
+
+#define EXP_M1_2  189
+#define EXP_M2_2  379692
+#define EXP_B_2   194609469
+
+#define EXP_M1_3  99
+#define EXP_M2_3  217915
+#define EXP_B_3   122255994
+
+#define EXP_M1_4  51
+#define EXP_M2_4  122643
+#define EXP_B_4   74893384
+
+#define EXP_M1_5  423
+#define EXP_M2_5  1103326
+#define EXP_B_5   728122621
+
 enum SID_OPTION {
        SID_OPTION_HI,
        SID_OPTION_LO,
@@ -548,20 +572,20 @@ struct SMU74_Firmware_Header {
        uint32_t CacConfigTable;
        uint32_t CacStatusTable;
 
-
        uint32_t mcRegisterTable;
 
-
        uint32_t mcArbDramTimingTable;
 
-
-
-
        uint32_t PmFuseTable;
        uint32_t Globals;
        uint32_t ClockStretcherTable;
        uint32_t VftTable;
-       uint32_t Reserved[21];
+       uint32_t Reserved1;
+       uint32_t AvfsTable;
+       uint32_t AvfsCksOffGbvTable;
+       uint32_t AvfsMeanNSigma;
+       uint32_t AvfsSclkOffsetTable;
+       uint32_t Reserved[16];
        uint32_t Signature;
 };
 
@@ -701,8 +725,6 @@ VR Config info is contained in dpmTable.VRConfig */
 struct SMU_ClockStretcherDataTableEntry {
        uint8_t minVID;
        uint8_t maxVID;
-
-
        uint16_t setting;
 };
 typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
@@ -769,6 +791,43 @@ struct VFT_TABLE_t {
 typedef struct VFT_TABLE_t VFT_TABLE_t;
 
 
+/* Total margin, root mean square of Fmax + DC + Platform */
+struct AVFS_Margin_t {
+       VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_Margin_t AVFS_Margin_t;
+
+#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
+#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
+
+struct GB_VDROOP_TABLE_t {
+       int32_t a0;
+       int32_t a1;
+       int32_t a2;
+       uint32_t spare;
+};
+typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
+
+struct AVFS_CksOff_Gbv_t {
+       VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
+
+struct AVFS_meanNsigma_t {
+       uint32_t Aconstant[3];
+       uint16_t DC_tol_sigma;
+       uint16_t Platform_mean;
+       uint16_t Platform_sigma;
+       uint16_t PSM_Age_CompFactor;
+       uint8_t  Static_Voltage_Offset[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
+
+struct AVFS_Sclk_Offset_t {
+       uint16_t Sclk_Offset[8];
+};
+typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
+
 #endif
 
 
index 0dfe82336dc73f439a8d2dcca5043cf6a3661727..899d6d8108c210fb6b5548f3f965e29a02b80f8b 100644 (file)
@@ -223,6 +223,16 @@ struct SMU74_Discrete_StateInfo {
 
 typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo;
 
+struct SMU_QuadraticCoeffs {
+       int32_t m1;
+       uint32_t b;
+
+       int16_t m2;
+       uint8_t m1_shift;
+       uint8_t m2_shift;
+};
+typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
+
 struct SMU74_Discrete_DpmTable {
 
        SMU74_PIDController                  GraphicsPIDController;
@@ -258,7 +268,15 @@ struct SMU74_Discrete_DpmTable {
        uint8_t                             ThermOutPolarity;
        uint8_t                             ThermOutMode;
        uint8_t                             BootPhases;
-       uint32_t                            Reserved[4];
+
+       uint8_t                             VRHotLevel;
+       uint8_t                             LdoRefSel;
+       uint8_t                             Reserved1[2];
+       uint16_t                            FanStartTemperature;
+       uint16_t                            FanStopTemperature;
+       uint16_t                            MaxVoltage;
+       uint16_t                            Reserved2;
+       uint32_t                            Reserved[1];
 
        SMU74_Discrete_GraphicsLevel        GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS];
        SMU74_Discrete_MemoryLevel          MemoryACPILevel;
@@ -347,6 +365,8 @@ struct SMU74_Discrete_DpmTable {
 
        uint32_t                            CurrSclkPllRange;
        sclkFcwRange_t                      SclkFcwRangeTable[NUM_SCLK_RANGE];
+       GB_VDROOP_TABLE_t                   BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
+       SMU_QuadraticCoeffs                 AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
 };
 
 typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable;
@@ -550,16 +570,6 @@ struct SMU7_AcpiScoreboard {
 
 typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
 
-struct SMU_QuadraticCoeffs {
-       int32_t m1;
-       uint32_t b;
-
-       int16_t m2;
-       uint8_t m1_shift;
-       uint8_t m2_shift;
-};
-typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
-
 struct SMU74_Discrete_PmFuses {
        uint8_t BapmVddCVidHiSidd[8];
        uint8_t BapmVddCVidLoSidd[8];
@@ -821,6 +831,17 @@ typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
 #define DB_PCC_SHIFT 26 
 #define DB_EDC_SHIFT 27
 
+#define BTCGB0_Vdroop_Enable_MASK  0x1
+#define BTCGB1_Vdroop_Enable_MASK  0x2
+#define AVFSGB0_Vdroop_Enable_MASK 0x4
+#define AVFSGB1_Vdroop_Enable_MASK 0x8
+
+#define BTCGB0_Vdroop_Enable_SHIFT  0
+#define BTCGB1_Vdroop_Enable_SHIFT  1
+#define AVFSGB0_Vdroop_Enable_SHIFT 2
+#define AVFSGB1_Vdroop_Enable_SHIFT 3
+
+
 #pragma pack(pop)
 
 
index fc9e3d1dd4098fe4115c19099c6e98e52dea1c1d..3c235f0177cd9e0667b9d0deab7860efec7ac061 100644 (file)
@@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle);
        smum_wait_on_indirect_register(smumgr,                          \
                                mm##port##_INDEX, index, value, mask)
 
+#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask)    \
+           SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+
+#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval)                          \
+           SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
+                                   SMUM_FIELD_MASK(reg, field) )
 
 #define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr,         \
                                                        index, value, mask) \
@@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle);
                (SMUM_FIELD_MASK(reg, field) & ((field_val) <<                 \
                        SMUM_FIELD_SHIFT(reg, field))))
 
+#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \
+           SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
+                          reg, field)
+
 #define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr,           \
                                port, index, value, mask)               \
        smum_wait_on_indirect_register(smumgr,                          \
@@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle);
                        SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
                        reg, field, fieldval))
 
+
+#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval)                  \
+               cgs_write_ind_register(device, port, ix##reg,                           \
+                       SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg),    \
+                                      reg, field, fieldval))
+
+
 #define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
        SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg,             \
                (fieldval) << SMUM_FIELD_SHIFT(reg, field),             \
@@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle);
        SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg,     \
                (fieldval) << SMUM_FIELD_SHIFT(reg, field),             \
                SMUM_FIELD_MASK(reg, field))
+
+#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask)    \
+       smum_wait_for_indirect_register_unequal(smumgr,                 \
+               mm##port##_INDEX, index, value, mask)
+
+#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask)    \
+           SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+
+#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval)                          \
+           SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
+                                   SMUM_FIELD_MASK(reg, field) )
+
 #endif
index 673a75c74e18a33c2a42f0ae92bca8e40ee30603..8e52a2e82db58c4de3ffc7ff3903bbabec037f4a 100644 (file)
@@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
 
 static int fiji_smu_fini(struct pp_smumgr *smumgr)
 {
+       struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+
+       smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
        if (smumgr->backend) {
                kfree(smumgr->backend);
                smumgr->backend = NULL;
        }
+
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
        return 0;
 }
 
index de618ead9db8df73e10ee0da9dac0cad940049b3..5dba7c50971020fe1ff3a052970fd14cb8860b09 100644 (file)
 static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
        /*  Min      pcie   DeepSleep Activity  CgSpll      CgSpll    CcPwr  CcPwr  Sclk         Enabled      Enabled                       Voltage    Power */
        /* Voltage, DpmLevel, DivId,  Level,  FuncCntl3,  FuncCntl4,  DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
-       { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } }
+       { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+       { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+       { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } },
+       { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+       { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } },
+       { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+       { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } },
+       { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
 };
 
 static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
-       {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00,
-        0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00};
+       {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
 
 /**
 * Set the address for reading/writing the SMC SRAM space.
@@ -219,6 +218,18 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
        && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
 }
 
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+       uint32_t efuse;
+
+       efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+       efuse &= 0x00000001;
+       if (efuse)
+               return true;
+
+       return false;
+}
+
 /**
 * Send a message to the SMC, and wait for its response.
 *
@@ -228,21 +239,27 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
 */
 int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
 {
+       int ret;
+
        if (!polaris10_is_smc_ram_running(smumgr))
                return -1;
 
+
        SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
 
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
-               printk("Failed to send Previous Message.\n");
+       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
 
+       if (ret != 1)
+               printk("\n failed to send pre message %x ret is %d \n",  msg, ret);
 
        cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
 
        SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
 
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
-               printk("Failed to send Message.\n");
+       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+       if (ret != 1)
+               printk("\n failed to send message %x ret is %d \n",  msg, ret);
 
        return 0;
 }
@@ -469,6 +486,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr)
                kfree(smumgr->backend);
                smumgr->backend = NULL;
        }
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
        return 0;
 }
 
@@ -952,6 +970,11 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
                (cgs_handle_t)smu_data->smu_buffer.handle);
                return -1;);
 
+       if (polaris10_is_hw_avfs_present(smumgr))
+               smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
+       else
+               smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+
        return 0;
 }
 
index c483baf6b4fbc9bb8b76d293c772f727631c0726..7723473e51a073cea3365d0c9b32b4a211bd51bc 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
 #include "pp_instance.h"
 #include "smumgr.h"
 #include "cgs_common.h"
@@ -52,10 +53,10 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
        handle->smu_mgr = smumgr;
 
        switch (smumgr->chip_family) {
-       case AMD_FAMILY_CZ:
+       case AMDGPU_FAMILY_CZ:
                cz_smum_init(smumgr);
                break;
-       case AMD_FAMILY_VI:
+       case AMDGPU_FAMILY_VI:
                switch (smumgr->chip_id) {
                case CHIP_TONGA:
                        tonga_smum_init(smumgr);
@@ -81,6 +82,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
 
 int smum_fini(struct pp_smumgr *smumgr)
 {
+       kfree(smumgr->device);
        kfree(smumgr);
        return 0;
 }
index 32820b680d8886104a1e8c28f7324489e9c4c375..b22722eabafcbc53477dd284a2671326443ec011 100644 (file)
@@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
 
 static int tonga_smu_fini(struct pp_smumgr *smumgr)
 {
+       struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
+
+       smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
+       smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
        if (smumgr->backend != NULL) {
                kfree(smumgr->backend);
                smumgr->backend = NULL;
        }
+
+       cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
        return 0;
 }
 
index c89dc777768f8cdabe3ce882483c134faa6aefa7..b961a1c6caf30ea09e8eea2dae6f320cafb81d09 100644 (file)
@@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job,
            TP_fast_assign(
                           __entry->entity = sched_job->s_entity;
                           __entry->sched_job = sched_job;
-                          __entry->fence = &sched_job->s_fence->base;
+                          __entry->fence = &sched_job->s_fence->finished;
                           __entry->name = sched_job->sched->name;
                           __entry->job_count = kfifo_len(
                                   &sched_job->s_entity->job_queue) / sizeof(sched_job);
@@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job,
                    ),
 
            TP_fast_assign(
-                   __entry->fence = &fence->base;
+                   __entry->fence = &fence->finished;
                    ),
            TP_printk("fence=%p signaled", __entry->fence)
 );
index c16248cee7799221799c15a1dc707c83fd2b295f..70ff09d10885f3d19828233f6bddcc1e227add5c 100644 (file)
@@ -32,6 +32,7 @@
 
 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
 
 struct kmem_cache *sched_fence_slab;
 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
                return r;
 
        atomic_set(&entity->fence_seq, 0);
-       entity->fence_context = fence_context_alloc(1);
+       entity->fence_context = fence_context_alloc(2);
 
        return 0;
 }
@@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
 
        s_fence = to_amd_sched_fence(fence);
        if (s_fence && s_fence->sched == sched) {
-               /* Fence is from the same scheduler */
-               if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
-                       /* Ignore it when it is already scheduled */
-                       fence_put(entity->dependency);
-                       return false;
-               }
 
-               /* Wait for fence to be scheduled */
-               entity->cb.func = amd_sched_entity_clear_dep;
-               list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
-               return true;
+               /*
+                * Fence is from the same scheduler, only need to wait for
+                * it to be scheduled
+                */
+               fence = fence_get(&s_fence->scheduled);
+               fence_put(entity->dependency);
+               entity->dependency = fence;
+               if (!fence_add_callback(fence, &entity->cb,
+                                       amd_sched_entity_clear_dep))
+                       return true;
+
+               /* Ignore it when it is already scheduled */
+               fence_put(fence);
+               return false;
        }
 
        if (!fence_add_callback(entity->dependency, &entity->cb,
@@ -319,46 +324,108 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
        return added;
 }
 
-static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
-       struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
-       schedule_work(&job->work_free_job);
-}
-
 /* job_finish is called after hw fence signaled, and
  * the job had already been deleted from ring_mirror_list
  */
-void amd_sched_job_finish(struct amd_sched_job *s_job)
+static void amd_sched_job_finish(struct work_struct *work)
 {
-       struct amd_sched_job *next;
+       struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+                                                  finish_work);
        struct amd_gpu_scheduler *sched = s_job->sched;
 
+       /* remove job from ring_mirror_list */
+       spin_lock(&sched->job_list_lock);
+       list_del_init(&s_job->node);
        if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
-               if (cancel_delayed_work(&s_job->work_tdr))
-                       amd_sched_job_put(s_job);
+               struct amd_sched_job *next;
+
+               spin_unlock(&sched->job_list_lock);
+               cancel_delayed_work_sync(&s_job->work_tdr);
+               spin_lock(&sched->job_list_lock);
 
                /* queue TDR for next job */
                next = list_first_entry_or_null(&sched->ring_mirror_list,
                                                struct amd_sched_job, node);
 
-               if (next) {
-                       INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
-                       amd_sched_job_get(next);
+               if (next)
                        schedule_delayed_work(&next->work_tdr, sched->timeout);
-               }
        }
+       spin_unlock(&sched->job_list_lock);
+       sched->ops->free_job(s_job);
 }
 
-void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
+{
+       struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+                                                finish_cb);
+       schedule_work(&job->finish_work);
+}
+
+static void amd_sched_job_begin(struct amd_sched_job *s_job)
 {
        struct amd_gpu_scheduler *sched = s_job->sched;
 
+       spin_lock(&sched->job_list_lock);
+       list_add_tail(&s_job->node, &sched->ring_mirror_list);
        if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-               list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
-       {
-               INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
-               amd_sched_job_get(s_job);
+           list_first_entry_or_null(&sched->ring_mirror_list,
+                                    struct amd_sched_job, node) == s_job)
+               schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+       spin_unlock(&sched->job_list_lock);
+}
+
+static void amd_sched_job_timedout(struct work_struct *work)
+{
+       struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+                                                work_tdr.work);
+
+       job->sched->ops->timedout_job(job);
+}
+
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
+{
+       struct amd_sched_job *s_job;
+
+       spin_lock(&sched->job_list_lock);
+       list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+               if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
+                       fence_put(s_job->s_fence->parent);
+                       s_job->s_fence->parent = NULL;
+               }
+       }
+       spin_unlock(&sched->job_list_lock);
+}
+
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+{
+       struct amd_sched_job *s_job;
+       int r;
+
+       spin_lock(&sched->job_list_lock);
+       s_job = list_first_entry_or_null(&sched->ring_mirror_list,
+                                        struct amd_sched_job, node);
+       if (s_job)
                schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+
+       list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+               struct amd_sched_fence *s_fence = s_job->s_fence;
+               struct fence *fence = sched->ops->run_job(s_job);
+               if (fence) {
+                       s_fence->parent = fence_get(fence);
+                       r = fence_add_callback(fence, &s_fence->cb,
+                                              amd_sched_process_job);
+                       if (r == -ENOENT)
+                               amd_sched_process_job(fence, &s_fence->cb);
+                       else if (r)
+                               DRM_ERROR("fence add callback failed (%d)\n",
+                                         r);
+                       fence_put(fence);
+               } else {
+                       DRM_ERROR("Failed to run job!\n");
+                       amd_sched_process_job(NULL, &s_fence->cb);
+               }
        }
+       spin_unlock(&sched->job_list_lock);
 }
 
 /**
@@ -372,36 +439,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 {
        struct amd_sched_entity *entity = sched_job->s_entity;
 
-       sched_job->use_sched = 1;
-       fence_add_callback(&sched_job->s_fence->base,
-                                       &sched_job->cb_free_job, amd_sched_free_job);
        trace_amd_sched_job(sched_job);
+       fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
+                          amd_sched_job_finish_cb);
        wait_event(entity->sched->job_scheduled,
                   amd_sched_entity_in(sched_job));
 }
 
 /* init a sched_job with basic field */
 int amd_sched_job_init(struct amd_sched_job *job,
-                                               struct amd_gpu_scheduler *sched,
-                                               struct amd_sched_entity *entity,
-                                               void (*timeout_cb)(struct work_struct *work),
-                                               void (*free_cb)(struct kref *refcount),
-                                               void *owner, struct fence **fence)
+                      struct amd_gpu_scheduler *sched,
+                      struct amd_sched_entity *entity,
+                      void *owner)
 {
-       INIT_LIST_HEAD(&job->node);
-       kref_init(&job->refcount);
        job->sched = sched;
        job->s_entity = entity;
        job->s_fence = amd_sched_fence_create(entity, owner);
        if (!job->s_fence)
                return -ENOMEM;
 
-       job->s_fence->s_job = job;
-       job->timeout_callback = timeout_cb;
-       job->free_callback = free_cb;
+       INIT_WORK(&job->finish_work, amd_sched_job_finish);
+       INIT_LIST_HEAD(&job->node);
+       INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
 
-       if (fence)
-               *fence = &job->s_fence->base;
        return 0;
 }
 
@@ -450,23 +510,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
        struct amd_sched_fence *s_fence =
                container_of(cb, struct amd_sched_fence, cb);
        struct amd_gpu_scheduler *sched = s_fence->sched;
-       unsigned long flags;
 
        atomic_dec(&sched->hw_rq_count);
-
-       /* remove job from ring_mirror_list */
-       spin_lock_irqsave(&sched->job_list_lock, flags);
-       list_del_init(&s_fence->s_job->node);
-       sched->ops->finish_job(s_fence->s_job);
-       spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
-       amd_sched_fence_signal(s_fence);
+       amd_sched_fence_finished(s_fence);
 
        trace_amd_sched_process_job(s_fence);
-       fence_put(&s_fence->base);
+       fence_put(&s_fence->finished);
        wake_up_interruptible(&sched->wake_up_worker);
 }
 
+static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+{
+       if (kthread_should_park()) {
+               kthread_parkme();
+               return true;
+       }
+
+       return false;
+}
+
 static int amd_sched_main(void *param)
 {
        struct sched_param sparam = {.sched_priority = 1};
@@ -476,14 +538,15 @@ static int amd_sched_main(void *param)
        sched_setscheduler(current, SCHED_FIFO, &sparam);
 
        while (!kthread_should_stop()) {
-               struct amd_sched_entity *entity;
+               struct amd_sched_entity *entity = NULL;
                struct amd_sched_fence *s_fence;
                struct amd_sched_job *sched_job;
                struct fence *fence;
 
                wait_event_interruptible(sched->wake_up_worker,
-                       (entity = amd_sched_select_entity(sched)) ||
-                       kthread_should_stop());
+                                        (!amd_sched_blocked(sched) &&
+                                         (entity = amd_sched_select_entity(sched))) ||
+                                        kthread_should_stop());
 
                if (!entity)
                        continue;
@@ -495,16 +558,19 @@ static int amd_sched_main(void *param)
                s_fence = sched_job->s_fence;
 
                atomic_inc(&sched->hw_rq_count);
-               amd_sched_job_pre_schedule(sched, sched_job);
+               amd_sched_job_begin(sched_job);
+
                fence = sched->ops->run_job(sched_job);
                amd_sched_fence_scheduled(s_fence);
                if (fence) {
+                       s_fence->parent = fence_get(fence);
                        r = fence_add_callback(fence, &s_fence->cb,
                                               amd_sched_process_job);
                        if (r == -ENOENT)
                                amd_sched_process_job(fence, &s_fence->cb);
                        else if (r)
-                               DRM_ERROR("fence add callback failed (%d)\n", r);
+                               DRM_ERROR("fence add callback failed (%d)\n",
+                                         r);
                        fence_put(fence);
                } else {
                        DRM_ERROR("Failed to run job!\n");
index 070095a9433c3f9e2aac2a25484e21169e462329..7cbbbfb502ef1342caa2a5b36aa970368fed5055 100644 (file)
@@ -27,8 +27,6 @@
 #include <linux/kfifo.h>
 #include <linux/fence.h>
 
-#define AMD_SCHED_FENCE_SCHEDULED_BIT  FENCE_FLAG_USER_BITS
-
 struct amd_gpu_scheduler;
 struct amd_sched_rq;
 
@@ -68,36 +66,34 @@ struct amd_sched_rq {
 };
 
 struct amd_sched_fence {
-       struct fence                    base;
+       struct fence                    scheduled;
+       struct fence                    finished;
        struct fence_cb                 cb;
-       struct list_head                scheduled_cb;
+       struct fence                    *parent;
        struct amd_gpu_scheduler        *sched;
        spinlock_t                      lock;
        void                            *owner;
-       struct amd_sched_job    *s_job;
 };
 
 struct amd_sched_job {
-       struct kref refcount;
        struct amd_gpu_scheduler        *sched;
        struct amd_sched_entity         *s_entity;
        struct amd_sched_fence          *s_fence;
-       bool    use_sched;      /* true if the job goes to scheduler */
-       struct fence_cb                cb_free_job;
-       struct work_struct             work_free_job;
-       struct list_head                           node;
-       struct delayed_work work_tdr;
-       void (*timeout_callback) (struct work_struct *work);
-       void (*free_callback)(struct kref *refcount);
+       struct fence_cb                 finish_cb;
+       struct work_struct              finish_work;
+       struct list_head                node;
+       struct delayed_work             work_tdr;
 };
 
-extern const struct fence_ops amd_sched_fence_ops;
+extern const struct fence_ops amd_sched_fence_ops_scheduled;
+extern const struct fence_ops amd_sched_fence_ops_finished;
 static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 {
-       struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base);
+       if (f->ops == &amd_sched_fence_ops_scheduled)
+               return container_of(f, struct amd_sched_fence, scheduled);
 
-       if (__f->base.ops == &amd_sched_fence_ops)
-               return __f;
+       if (f->ops == &amd_sched_fence_ops_finished)
+               return container_of(f, struct amd_sched_fence, finished);
 
        return NULL;
 }
@@ -109,8 +105,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
 struct amd_sched_backend_ops {
        struct fence *(*dependency)(struct amd_sched_job *sched_job);
        struct fence *(*run_job)(struct amd_sched_job *sched_job);
-       void (*begin_job)(struct amd_sched_job *sched_job);
-       void (*finish_job)(struct amd_sched_job *sched_job);
+       void (*timedout_job)(struct amd_sched_job *sched_job);
+       void (*free_job)(struct amd_sched_job *sched_job);
 };
 
 enum amd_sched_priority {
@@ -152,25 +148,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
 struct amd_sched_fence *amd_sched_fence_create(
        struct amd_sched_entity *s_entity, void *owner);
 void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
-void amd_sched_fence_signal(struct amd_sched_fence *fence);
+void amd_sched_fence_finished(struct amd_sched_fence *fence);
 int amd_sched_job_init(struct amd_sched_job *job,
-                                       struct amd_gpu_scheduler *sched,
-                                       struct amd_sched_entity *entity,
-                                       void (*timeout_cb)(struct work_struct *work),
-                                       void (*free_cb)(struct kref* refcount),
-                                       void *owner, struct fence **fence);
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
-                                                               struct amd_sched_job *s_job);
-void amd_sched_job_finish(struct amd_sched_job *s_job);
-void amd_sched_job_begin(struct amd_sched_job *s_job);
-static inline void amd_sched_job_get(struct amd_sched_job *job) {
-       if (job)
-               kref_get(&job->refcount);
-}
-
-static inline void amd_sched_job_put(struct amd_sched_job *job) {
-       if (job)
-               kref_put(&job->refcount, job->free_callback);
-}
-
+                      struct amd_gpu_scheduler *sched,
+                      struct amd_sched_entity *entity,
+                      void *owner);
+void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
+void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
 #endif
index 2a732c4903755db7d3838f9b6c8ae00ee2804309..6b63beaf75746848720f98d4eb2b5329c299267b 100644 (file)
@@ -27,7 +27,8 @@
 #include <drm/drmP.h>
 #include "gpu_scheduler.h"
 
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
+struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
+                                              void *owner)
 {
        struct amd_sched_fence *fence = NULL;
        unsigned seq;
@@ -36,46 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
        if (fence == NULL)
                return NULL;
 
-       INIT_LIST_HEAD(&fence->scheduled_cb);
        fence->owner = owner;
-       fence->sched = s_entity->sched;
+       fence->sched = entity->sched;
        spin_lock_init(&fence->lock);
 
-       seq = atomic_inc_return(&s_entity->fence_seq);
-       fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
-                  s_entity->fence_context, seq);
+       seq = atomic_inc_return(&entity->fence_seq);
+       fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
+                  &fence->lock, entity->fence_context, seq);
+       fence_init(&fence->finished, &amd_sched_fence_ops_finished,
+                  &fence->lock, entity->fence_context + 1, seq);
 
        return fence;
 }
 
-void amd_sched_fence_signal(struct amd_sched_fence *fence)
+void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
 {
-       int ret = fence_signal(&fence->base);
+       int ret = fence_signal(&fence->scheduled);
+
        if (!ret)
-               FENCE_TRACE(&fence->base, "signaled from irq context\n");
+               FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
        else
-               FENCE_TRACE(&fence->base, "was already signaled\n");
-}
-
-void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
-                               struct amd_sched_job *s_job)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&sched->job_list_lock, flags);
-       list_add_tail(&s_job->node, &sched->ring_mirror_list);
-       sched->ops->begin_job(s_job);
-       spin_unlock_irqrestore(&sched->job_list_lock, flags);
+               FENCE_TRACE(&fence->scheduled, "was already signaled\n");
 }
 
-void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
+void amd_sched_fence_finished(struct amd_sched_fence *fence)
 {
-       struct fence_cb *cur, *tmp;
+       int ret = fence_signal(&fence->finished);
 
-       set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
-       list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
-               list_del_init(&cur->node);
-               cur->func(&s_fence->base, cur);
-       }
+       if (!ret)
+               FENCE_TRACE(&fence->finished, "signaled from irq context\n");
+       else
+               FENCE_TRACE(&fence->finished, "was already signaled\n");
 }
 
 static const char *amd_sched_fence_get_driver_name(struct fence *fence)
@@ -105,6 +97,8 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
 {
        struct fence *f = container_of(rcu, struct fence, rcu);
        struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+       fence_put(fence->parent);
        kmem_cache_free(sched_fence_slab, fence);
 }
 
@@ -116,16 +110,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
  * This function is called when the reference count becomes zero.
  * It just RCU schedules freeing up the fence.
  */
-static void amd_sched_fence_release(struct fence *f)
+static void amd_sched_fence_release_scheduled(struct fence *f)
 {
-       call_rcu(&f->rcu, amd_sched_fence_free);
+       struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+       call_rcu(&fence->finished.rcu, amd_sched_fence_free);
 }
 
-const struct fence_ops amd_sched_fence_ops = {
+/**
+ * amd_sched_fence_release_scheduled - drop extra reference
+ *
+ * @f: fence
+ *
+ * Drop the extra reference from the scheduled fence to the base fence.
+ */
+static void amd_sched_fence_release_finished(struct fence *f)
+{
+       struct amd_sched_fence *fence = to_amd_sched_fence(f);
+
+       fence_put(&fence->scheduled);
+}
+
+const struct fence_ops amd_sched_fence_ops_scheduled = {
+       .get_driver_name = amd_sched_fence_get_driver_name,
+       .get_timeline_name = amd_sched_fence_get_timeline_name,
+       .enable_signaling = amd_sched_fence_enable_signaling,
+       .signaled = NULL,
+       .wait = fence_default_wait,
+       .release = amd_sched_fence_release_scheduled,
+};
+
+const struct fence_ops amd_sched_fence_ops_finished = {
        .get_driver_name = amd_sched_fence_get_driver_name,
        .get_timeline_name = amd_sched_fence_get_timeline_name,
        .enable_signaling = amd_sched_fence_enable_signaling,
        .signaled = NULL,
        .wait = fence_default_wait,
-       .release = amd_sched_fence_release,
+       .release = amd_sched_fence_release_finished,
 };
index f9a13b658fea667e9f5592fe6f619f3d9929ecc3..f47d88ba4fa53c24979adbcaa9ba2172dd2b77c4 100644 (file)
@@ -2,7 +2,6 @@ config DRM_ARCPGU
        tristate "ARC PGU"
        depends on DRM && OF
        select DRM_KMS_CMA_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_HELPER
        help
          Choose this option if you have an ARC PGU controller.
index d48fda70f857dfecd1892745d3f9ce5fa291fc27..73de56a0139a840c96907908139d17eeeb0f8444 100644 (file)
@@ -1,2 +1,2 @@
-arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_drv.o
+arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o
 obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
index 86574b698a78509145452795c4e0aa56690d5f4f..e8fcf3ab1d9a9d297287ea15b80493acbe4e40a4 100644 (file)
@@ -22,7 +22,6 @@ struct arcpgu_drm_private {
        struct clk              *clk;
        struct drm_fbdev_cma    *fbdev;
        struct drm_framebuffer  *fb;
-       struct list_head        event_list;
        struct drm_crtc         crtc;
        struct drm_plane        *plane;
 };
@@ -43,6 +42,7 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
 
 int arc_pgu_setup_crtc(struct drm_device *dev);
 int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
+int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
 struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
        unsigned int preferred_bpp, unsigned int num_crtc,
        unsigned int max_conn_count);
index 92f8beff8e6076ca36e01f36b114ebf5623a200e..ee0a61c2861b5a33a56c09627db566eb7b1c6a8a 100644 (file)
@@ -145,20 +145,14 @@ static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
 static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
                                      struct drm_crtc_state *state)
 {
-       struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
-       unsigned long flags;
-
-       if (crtc->state->event) {
-               struct drm_pending_vblank_event *event = crtc->state->event;
+       struct drm_pending_vblank_event *event = crtc->state->event;
 
+       if (event) {
                crtc->state->event = NULL;
-               event->pipe = drm_crtc_index(crtc);
-
-               WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
-               spin_lock_irqsave(&crtc->dev->event_lock, flags);
-               list_add_tail(&event->base.link, &arcpgu->event_list);
-               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+               spin_lock_irq(&crtc->dev->event_lock);
+               drm_crtc_send_vblank_event(crtc, event);
+               spin_unlock_irq(&crtc->dev->event_lock);
        }
 }
 
index 76e187a5bde021fe523c5af5583a72f09413c4c4..6d4ff34737cb73746ce37dd7b4c00293d74c1283 100644 (file)
@@ -28,21 +28,14 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
 {
        struct arcpgu_drm_private *arcpgu = dev->dev_private;
 
-       if (arcpgu->fbdev)
-               drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
-}
-
-static int arcpgu_atomic_commit(struct drm_device *dev,
-                                   struct drm_atomic_state *state, bool async)
-{
-       return drm_atomic_helper_commit(dev, state, false);
+       drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
 }
 
 static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
        .fb_create  = drm_fb_cma_create,
        .output_poll_changed = arcpgu_fb_output_poll_changed,
        .atomic_check = drm_atomic_helper_check,
-       .atomic_commit = arcpgu_atomic_commit,
+       .atomic_commit = drm_atomic_helper_commit,
 };
 
 static void arcpgu_setup_mode_config(struct drm_device *drm)
@@ -55,7 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
        drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
 }
 
-int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        int ret;
 
@@ -81,22 +74,6 @@ static const struct file_operations arcpgu_drm_ops = {
        .mmap = arcpgu_gem_mmap,
 };
 
-static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file)
-{
-       struct arcpgu_drm_private *arcpgu = drm->dev_private;
-       struct drm_pending_vblank_event *e, *t;
-       unsigned long flags;
-
-       spin_lock_irqsave(&drm->event_lock, flags);
-       list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) {
-               if (e->base.file_priv != file)
-                       continue;
-               list_del(&e->base.link);
-               e->base.destroy(&e->base);
-       }
-       spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
 static void arcpgu_lastclose(struct drm_device *drm)
 {
        struct arcpgu_drm_private *arcpgu = drm->dev_private;
@@ -122,16 +99,12 @@ static int arcpgu_load(struct drm_device *drm)
        if (IS_ERR(arcpgu->clk))
                return PTR_ERR(arcpgu->clk);
 
-       INIT_LIST_HEAD(&arcpgu->event_list);
-
        arcpgu_setup_mode_config(drm);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(arcpgu->regs)) {
-               dev_err(drm->dev, "Could not remap IO mem\n");
+       if (IS_ERR(arcpgu->regs))
                return PTR_ERR(arcpgu->regs);
-       }
 
        dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
                 arc_pgu_read(arcpgu, ARCPGU_REG_ID));
@@ -149,15 +122,17 @@ static int arcpgu_load(struct drm_device *drm)
 
        /* find the encoder node and initialize it */
        encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
-       if (!encoder_node) {
-               dev_err(drm->dev, "failed to get an encoder slave node\n");
-               return -ENODEV;
+       if (encoder_node) {
+               ret = arcpgu_drm_hdmi_init(drm, encoder_node);
+               of_node_put(encoder_node);
+               if (ret < 0)
+                       return ret;
+       } else {
+               ret = arcpgu_drm_sim_init(drm, NULL);
+               if (ret < 0)
+                       return ret;
        }
 
-       ret = arcpgu_drm_hdmi_init(drm, encoder_node);
-       if (ret < 0)
-               return ret;
-
        drm_mode_config_reset(drm);
        drm_kms_helper_poll_init(drm);
 
@@ -174,7 +149,7 @@ static int arcpgu_load(struct drm_device *drm)
        return 0;
 }
 
-int arcpgu_unload(struct drm_device *drm)
+static int arcpgu_unload(struct drm_device *drm)
 {
        struct arcpgu_drm_private *arcpgu = drm->dev_private;
 
@@ -192,7 +167,6 @@ int arcpgu_unload(struct drm_device *drm)
 static struct drm_driver arcpgu_drm_driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
                           DRIVER_ATOMIC,
-       .preclose = arcpgu_preclose,
        .lastclose = arcpgu_lastclose,
        .name = "drm-arcpgu",
        .desc = "ARC PGU Controller",
@@ -207,7 +181,7 @@ static struct drm_driver arcpgu_drm_driver = {
        .get_vblank_counter = drm_vblank_no_hw_counter,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_free_object = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops = &drm_gem_cma_vm_ops,
        .gem_prime_export = drm_gem_prime_export,
        .gem_prime_import = drm_gem_prime_import,
@@ -235,15 +209,8 @@ static int arcpgu_probe(struct platform_device *pdev)
        if (ret)
                goto err_unload;
 
-       ret = drm_connector_register_all(drm);
-       if (ret)
-               goto err_unregister;
-
        return 0;
 
-err_unregister:
-       drm_dev_unregister(drm);
-
 err_unload:
        arcpgu_unload(drm);
 
@@ -257,7 +224,6 @@ static int arcpgu_remove(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
 
-       drm_connector_unregister_all(drm);
        drm_dev_unregister(drm);
        arcpgu_unload(drm);
        drm_dev_unref(drm);
index 08b6baeb320d75977d5e6a0fc510212a25404099..b7a8b2ac4055b6594daa4e15d0c68009dd1e1cdc 100644 (file)
@@ -46,23 +46,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
        return sfuncs->get_modes(&slave->base, connector);
 }
 
-struct drm_encoder *
-arcpgu_drm_connector_best_encoder(struct drm_connector *connector)
-{
-       struct drm_encoder_slave *slave;
-       struct arcpgu_drm_connector *con =
-               container_of(connector, struct arcpgu_drm_connector, connector);
-
-       slave = con->encoder_slave;
-       if (slave == NULL) {
-               dev_err(connector->dev->dev,
-                       "connector_best_encoder: cannot find slave encoder for connector\n");
-               return NULL;
-       }
-
-       return &slave->base;
-}
-
 static enum drm_connector_status
 arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -97,7 +80,6 @@ static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
 static const struct drm_connector_helper_funcs
 arcpgu_drm_connector_helper_funcs = {
        .get_modes = arcpgu_drm_connector_get_modes,
-       .best_encoder = arcpgu_drm_connector_best_encoder,
 };
 
 static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
new file mode 100644 (file)
index 0000000..2bf06d7
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "arcpgu.h"
+
+#define XRES_DEF       640
+#define YRES_DEF       480
+
+#define XRES_MAX       8192
+#define YRES_MAX       8192
+
+
+struct arcpgu_drm_connector {
+       struct drm_connector connector;
+       struct drm_encoder_slave *encoder_slave;
+};
+
+static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
+{
+       int count;
+
+       count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+       drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+       return count;
+}
+
+static enum drm_connector_status
+arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
+{
+       drm_connector_unregister(connector);
+       drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_helper_funcs
+arcpgu_drm_connector_helper_funcs = {
+       .get_modes = arcpgu_drm_connector_get_modes,
+};
+
+static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
+       .dpms = drm_helper_connector_dpms,
+       .reset = drm_atomic_helper_connector_reset,
+       .detect = arcpgu_drm_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = arcpgu_drm_connector_destroy,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
+{
+       struct arcpgu_drm_connector *arcpgu_connector;
+       struct drm_encoder_slave *encoder;
+       struct drm_connector *connector;
+       int ret;
+
+       encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
+       if (encoder == NULL)
+               return -ENOMEM;
+
+       encoder->base.possible_crtcs = 1;
+       encoder->base.possible_clones = 0;
+
+       ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+                              DRM_MODE_ENCODER_VIRTUAL, NULL);
+       if (ret)
+               return ret;
+
+       arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
+                                       GFP_KERNEL);
+       if (!arcpgu_connector) {
+               ret = -ENOMEM;
+               goto error_encoder_cleanup;
+       }
+
+       connector = &arcpgu_connector->connector;
+       drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
+
+       ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
+                       DRM_MODE_CONNECTOR_VIRTUAL);
+       if (ret < 0) {
+               dev_err(drm->dev, "failed to initialize drm connector\n");
+               goto error_encoder_cleanup;
+       }
+
+       ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
+       if (ret < 0) {
+               dev_err(drm->dev, "could not attach connector to encoder\n");
+               drm_connector_unregister(connector);
+               goto error_connector_cleanup;
+       }
+
+       arcpgu_connector->encoder_slave = encoder;
+
+       return 0;
+
+error_connector_cleanup:
+       drm_connector_cleanup(connector);
+
+error_encoder_cleanup:
+       drm_encoder_cleanup(&encoder->base);
+       return ret;
+}
index eaed454e043c73d2008403ec20936c8fcd240ed1..9a18e1bd57b427f7362366ffe9af64d02b21d63c 100644 (file)
@@ -9,7 +9,6 @@ config DRM_HDLCD
        depends on COMMON_CLK
        select DRM_ARM
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        help
          Choose this option if you have an ARM High Definition Colour LCD
@@ -25,3 +24,19 @@ config DRM_HDLCD_SHOW_UNDERRUN
          Enable this option to show in red colour the pixels that the
          HDLCD device did not fetch from framebuffer due to underrun
          conditions.
+
+config DRM_MALI_DISPLAY
+       tristate "ARM Mali Display Processor"
+       depends on DRM && OF && (ARM || ARM64)
+       depends on COMMON_CLK
+       select DRM_ARM
+       select DRM_KMS_HELPER
+       select DRM_KMS_CMA_HELPER
+       select DRM_GEM_CMA_HELPER
+       select VIDEOMODE_HELPERS
+       help
+         Choose this option if you want to compile the ARM Mali Display
+         Processor driver. It supports the DP500, DP550 and DP650 variants
+         of the hardware.
+
+         If compiled as a module it will be called mali-dp.
index 89dcb7bab93a065646af357c13deff8b8572bde8..bb8b158ff90d03dcc1d295f07b2d76ac3899304a 100644 (file)
@@ -1,2 +1,4 @@
 hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
 obj-$(CONFIG_DRM_HDLCD)        += hdlcd.o
+mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
+obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
index fef1b04c2aab4fef4411ac0cc10249c98caab0f5..48019ae22ddba5fcbb3283e94a016543fba3d8b3 100644 (file)
  *
  */
 
+static void hdlcd_crtc_cleanup(struct drm_crtc *crtc)
+{
+       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+       /* stop the controller on cleanup */
+       hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+       drm_crtc_cleanup(crtc);
+}
+
 static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
-       .destroy = drm_crtc_cleanup,
+       .destroy = hdlcd_crtc_cleanup,
        .set_config = drm_atomic_helper_set_config,
        .page_flip = drm_atomic_helper_page_flip,
        .reset = drm_atomic_helper_crtc_reset,
@@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
        struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
        struct drm_display_mode *m = &crtc->state->adjusted_mode;
        struct videomode vm;
-       unsigned int polarities, line_length, err;
+       unsigned int polarities, err;
 
        vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
        vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
@@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
        if (m->flags & DRM_MODE_FLAG_PVSYNC)
                polarities |= HDLCD_POLARITY_VSYNC;
 
-       line_length = crtc->primary->state->fb->pitches[0];
-
        /* Allow max number of outstanding requests and largest burst size */
        hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
                    HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
 
-       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
-       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
-       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
        hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
        hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
        hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
        hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
        hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
        hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
        hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
-       hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
        hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
 
        err = hdlcd_set_pxl_fmt(crtc);
@@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
        struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 
        clk_prepare_enable(hdlcd->clk);
+       hdlcd_crtc_mode_set_nofb(crtc);
        hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
-       drm_crtc_vblank_on(crtc);
 }
 
 static void hdlcd_crtc_disable(struct drm_crtc *crtc)
 {
        struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
 
-       if (!crtc->primary->fb)
+       if (!crtc->state->active)
                return;
 
-       clk_disable_unprepare(hdlcd->clk);
        hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
-       drm_crtc_vblank_off(crtc);
+       clk_disable_unprepare(hdlcd->clk);
 }
 
 static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
@@ -179,52 +182,39 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
 static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
                                    struct drm_crtc_state *state)
 {
-       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
-       unsigned long flags;
-
-       if (crtc->state->event) {
-               struct drm_pending_vblank_event *event = crtc->state->event;
+       struct drm_pending_vblank_event *event = crtc->state->event;
 
+       if (event) {
                crtc->state->event = NULL;
-               event->pipe = drm_crtc_index(crtc);
-
-               WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
-               spin_lock_irqsave(&crtc->dev->event_lock, flags);
-               list_add_tail(&event->base.link, &hdlcd->event_list);
-               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+               spin_lock_irq(&crtc->dev->event_lock);
+               if (drm_crtc_vblank_get(crtc) == 0)
+                       drm_crtc_arm_vblank_event(crtc, event);
+               else
+                       drm_crtc_send_vblank_event(crtc, event);
+               spin_unlock_irq(&crtc->dev->event_lock);
        }
 }
 
-static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
-                                   struct drm_crtc_state *state)
-{
-}
-
-static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
-                       const struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
 static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
-       .mode_fixup     = hdlcd_crtc_mode_fixup,
-       .mode_set       = drm_helper_crtc_mode_set,
-       .mode_set_base  = drm_helper_crtc_mode_set_base,
-       .mode_set_nofb  = hdlcd_crtc_mode_set_nofb,
        .enable         = hdlcd_crtc_enable,
        .disable        = hdlcd_crtc_disable,
-       .prepare        = hdlcd_crtc_disable,
-       .commit         = hdlcd_crtc_enable,
        .atomic_check   = hdlcd_crtc_atomic_check,
        .atomic_begin   = hdlcd_crtc_atomic_begin,
-       .atomic_flush   = hdlcd_crtc_atomic_flush,
 };
 
 static int hdlcd_plane_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *state)
 {
+       u32 src_w, src_h;
+
+       src_w = state->src_w >> 16;
+       src_h = state->src_h >> 16;
+
+       /* we can't do any scaling of the plane source */
+       if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
+               return -EINVAL;
+
        return 0;
 }
 
@@ -233,20 +223,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
 {
        struct hdlcd_drm_private *hdlcd;
        struct drm_gem_cma_object *gem;
+       unsigned int depth, bpp;
+       u32 src_w, src_h, dest_w, dest_h;
        dma_addr_t scanout_start;
 
-       if (!plane->state->crtc || !plane->state->fb)
+       if (!plane->state->fb)
                return;
 
-       hdlcd = crtc_to_hdlcd_priv(plane->state->crtc);
+       drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
+       src_w = plane->state->src_w >> 16;
+       src_h = plane->state->src_h >> 16;
+       dest_w = plane->state->crtc_w;
+       dest_h = plane->state->crtc_h;
        gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
-       scanout_start = gem->paddr;
+       scanout_start = gem->paddr + plane->state->fb->offsets[0] +
+               plane->state->crtc_y * plane->state->fb->pitches[0] +
+               plane->state->crtc_x * bpp / 8;
+
+       hdlcd = plane->dev->dev_private;
+       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
+       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]);
+       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
        hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
 }
 
 static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
-       .prepare_fb = NULL,
-       .cleanup_fb = NULL,
        .atomic_check = hdlcd_plane_atomic_check,
        .atomic_update = hdlcd_plane_atomic_update,
 };
@@ -294,16 +295,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
        return plane;
 }
 
-void hdlcd_crtc_suspend(struct drm_crtc *crtc)
-{
-       hdlcd_crtc_disable(crtc);
-}
-
-void hdlcd_crtc_resume(struct drm_crtc *crtc)
-{
-       hdlcd_crtc_enable(crtc);
-}
-
 int hdlcd_setup_crtc(struct drm_device *drm)
 {
        struct hdlcd_drm_private *hdlcd = drm->dev_private;
index b987c63ba8d6a63612f7d14ab56ad5e82c15b05a..d83b46a3032765c85fc8b00534ac550720002c8f 100644 (file)
@@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
        atomic_set(&hdlcd->dma_end_count, 0);
 #endif
 
-       INIT_LIST_HEAD(&hdlcd->event_list);
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
        if (IS_ERR(hdlcd->mmio)) {
@@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
                goto setup_fail;
        }
 
-       pm_runtime_enable(drm->dev);
-
-       pm_runtime_get_sync(drm->dev);
        ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
-       pm_runtime_put_sync(drm->dev);
        if (ret < 0) {
                DRM_ERROR("failed to install IRQ handler\n");
                goto irq_fail;
@@ -108,21 +102,14 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
 {
        struct hdlcd_drm_private *hdlcd = drm->dev_private;
 
-       if (hdlcd->fbdev)
-               drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
-}
-
-static int hdlcd_atomic_commit(struct drm_device *dev,
-                              struct drm_atomic_state *state, bool nonblock)
-{
-       return drm_atomic_helper_commit(dev, state, false);
+       drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
 }
 
 static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
        .fb_create = drm_fb_cma_create,
        .output_poll_changed = hdlcd_fb_output_poll_changed,
        .atomic_check = drm_atomic_helper_check,
-       .atomic_commit = hdlcd_atomic_commit,
+       .atomic_commit = drm_atomic_helper_commit,
 };
 
 static void hdlcd_setup_mode_config(struct drm_device *drm)
@@ -164,24 +151,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg)
                atomic_inc(&hdlcd->vsync_count);
 
 #endif
-       if (irq_status & HDLCD_INTERRUPT_VSYNC) {
-               bool events_sent = false;
-               unsigned long flags;
-               struct drm_pending_vblank_event *e, *t;
-
+       if (irq_status & HDLCD_INTERRUPT_VSYNC)
                drm_crtc_handle_vblank(&hdlcd->crtc);
 
-               spin_lock_irqsave(&drm->event_lock, flags);
-               list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
-                       list_del(&e->base.link);
-                       drm_crtc_send_vblank_event(&hdlcd->crtc, e);
-                       events_sent = true;
-               }
-               if (events_sent)
-                       drm_crtc_vblank_put(&hdlcd->crtc);
-               spin_unlock_irqrestore(&drm->event_lock, flags);
-       }
-
        /* acknowledge interrupt(s) */
        hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
 
@@ -275,6 +247,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
 static struct drm_info_list hdlcd_debugfs_list[] = {
        { "interrupt_count", hdlcd_show_underrun_count, 0 },
        { "clocks", hdlcd_show_pxlclock, 0 },
+       { "fb", drm_fb_cma_debugfs_show, 0 },
 };
 
 static int hdlcd_debugfs_init(struct drm_minor *minor)
@@ -316,7 +289,7 @@ static struct drm_driver hdlcd_driver = {
        .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank = hdlcd_enable_vblank,
        .disable_vblank = hdlcd_disable_vblank,
-       .gem_free_object = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops = &drm_gem_cma_vm_ops,
        .dumb_create = drm_gem_cma_dumb_create,
        .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -357,6 +330,8 @@ static int hdlcd_drm_bind(struct device *dev)
                return -ENOMEM;
 
        drm->dev_private = hdlcd;
+       dev_set_drvdata(dev, drm);
+
        hdlcd_setup_mode_config(drm);
        ret = hdlcd_load(drm, 0);
        if (ret)
@@ -366,14 +341,18 @@ static int hdlcd_drm_bind(struct device *dev)
        if (ret)
                goto err_unload;
 
-       dev_set_drvdata(dev, drm);
-
        ret = component_bind_all(dev, drm);
        if (ret) {
                DRM_ERROR("Failed to bind all components\n");
                goto err_unregister;
        }
 
+       ret = pm_runtime_set_active(dev);
+       if (ret)
+               goto err_pm_active;
+
+       pm_runtime_enable(dev);
+
        ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
        if (ret < 0) {
                DRM_ERROR("failed to initialise vblank\n");
@@ -399,16 +378,16 @@ err_fbdev:
        drm_mode_config_cleanup(drm);
        drm_vblank_cleanup(drm);
 err_vblank:
+       pm_runtime_disable(drm->dev);
+err_pm_active:
        component_unbind_all(dev, drm);
 err_unregister:
        drm_dev_unregister(drm);
 err_unload:
-       pm_runtime_get_sync(drm->dev);
        drm_irq_uninstall(drm);
-       pm_runtime_put_sync(drm->dev);
-       pm_runtime_disable(drm->dev);
        of_reserved_mem_device_release(drm->dev);
 err_free:
+       dev_set_drvdata(dev, NULL);
        drm_dev_unref(drm);
 
        return ret;
@@ -495,30 +474,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
 static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
-       struct drm_crtc *crtc;
+       struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
 
-       if (pm_runtime_suspended(dev))
+       if (!hdlcd)
                return 0;
 
-       drm_modeset_lock_all(drm);
-       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-               hdlcd_crtc_suspend(crtc);
-       drm_modeset_unlock_all(drm);
+       drm_kms_helper_poll_disable(drm);
+
+       hdlcd->state = drm_atomic_helper_suspend(drm);
+       if (IS_ERR(hdlcd->state)) {
+               drm_kms_helper_poll_enable(drm);
+               return PTR_ERR(hdlcd->state);
+       }
+
        return 0;
 }
 
 static int __maybe_unused hdlcd_pm_resume(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
-       struct drm_crtc *crtc;
+       struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
 
-       if (!pm_runtime_suspended(dev))
+       if (!hdlcd)
                return 0;
 
-       drm_modeset_lock_all(drm);
-       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-               hdlcd_crtc_resume(crtc);
-       drm_modeset_unlock_all(drm);
+       drm_atomic_helper_resume(drm, hdlcd->state);
+       drm_kms_helper_poll_enable(drm);
+       pm_runtime_set_active(dev);
+
        return 0;
 }
 
index aa234784f0537fde1e248afeace94c898c3d1752..e3950a0711524a13d637f7fc0ec6a9100b9c6219 100644 (file)
@@ -9,10 +9,9 @@ struct hdlcd_drm_private {
        void __iomem                    *mmio;
        struct clk                      *clk;
        struct drm_fbdev_cma            *fbdev;
-       struct drm_framebuffer          *fb;
-       struct list_head                event_list;
        struct drm_crtc                 crtc;
        struct drm_plane                *plane;
+       struct drm_atomic_state         *state;
 #ifdef CONFIG_DEBUG_FS
        atomic_t buffer_underrun_count;
        atomic_t bus_error_count;
@@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg)
 
 int hdlcd_setup_crtc(struct drm_device *dev);
 void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
-void hdlcd_crtc_suspend(struct drm_crtc *crtc);
-void hdlcd_crtc_resume(struct drm_crtc *crtc);
 
 #endif /* __HDLCD_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
new file mode 100644 (file)
index 0000000..08e6a71
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 driver (crtc operations)
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/clk.h>
+#include <video/videomode.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+
+static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc,
+                                  const struct drm_display_mode *mode,
+                                  struct drm_display_mode *adjusted_mode)
+{
+       struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+       struct malidp_hw_device *hwdev = malidp->dev;
+
+       /*
+        * check that the hardware can drive the required clock rate,
+        * but skip the check if the clock is meant to be disabled (req_rate = 0)
+        */
+       long rate, req_rate = mode->crtc_clock * 1000;
+
+       if (req_rate) {
+               rate = clk_round_rate(hwdev->mclk, req_rate);
+               if (rate < req_rate) {
+                       DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n",
+                                        mode->crtc_clock);
+                       return false;
+               }
+
+               rate = clk_round_rate(hwdev->pxlclk, req_rate);
+               if (rate != req_rate) {
+                       DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
+                                        req_rate);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
+static void malidp_crtc_enable(struct drm_crtc *crtc)
+{
+       struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+       struct malidp_hw_device *hwdev = malidp->dev;
+       struct videomode vm;
+
+       drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
+
+       clk_prepare_enable(hwdev->pxlclk);
+
+       /* mclk needs to be set to the same or higher rate than pxlclk */
+       clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+       clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+
+       hwdev->modeset(hwdev, &vm);
+       hwdev->leave_config_mode(hwdev);
+       drm_crtc_vblank_on(crtc);
+}
+
+static void malidp_crtc_disable(struct drm_crtc *crtc)
+{
+       struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+       struct malidp_hw_device *hwdev = malidp->dev;
+
+       drm_crtc_vblank_off(crtc);
+       hwdev->enter_config_mode(hwdev);
+       clk_disable_unprepare(hwdev->pxlclk);
+}
+
+static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *state)
+{
+       struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+       struct malidp_hw_device *hwdev = malidp->dev;
+       struct drm_plane *plane;
+       const struct drm_plane_state *pstate;
+       u32 rot_mem_free, rot_mem_usable;
+       int rotated_planes = 0;
+
+       /*
+        * check if there is enough rotation memory available for planes
+        * that need 90° and 270° rotation. Each plane has set its required
+        * memory size in the ->plane_check() callback, here we only make
+        * sure that the sums are less that the total usable memory.
+        *
+        * The rotation memory allocation algorithm (for each plane):
+        *  a. If no more rotated planes exist, all remaining rotate
+        *     memory in the bank is available for use by the plane.
+        *  b. If other rotated planes exist, and plane's layer ID is
+        *     DE_VIDEO1, it can use all the memory from first bank if
+        *     secondary rotation memory bank is available, otherwise it can
+        *     use up to half the bank's memory.
+        *  c. If other rotated planes exist, and plane's layer ID is not
+        *     DE_VIDEO1, it can use half of the available memory
+        *
+        * Note: this algorithm assumes that the order in which the planes are
+        * checked always has DE_VIDEO1 plane first in the list if it is
+        * rotated. Because that is how we create the planes in the first
+        * place, under current DRM version things work, but if ever the order
+        * in which drm_atomic_crtc_state_for_each_plane() iterates over planes
+        * changes, we need to pre-sort the planes before validation.
+        */
+
+       /* first count the number of rotated planes */
+       drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+               if (pstate->rotation & MALIDP_ROTATED_MASK)
+                       rotated_planes++;
+       }
+
+       rot_mem_free = hwdev->rotation_memory[0];
+       /*
+        * if we have more than 1 plane using rotation memory, use the second
+        * block of rotation memory as well
+        */
+       if (rotated_planes > 1)
+               rot_mem_free += hwdev->rotation_memory[1];
+
+       /* now validate the rotation memory requirements */
+       drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+               struct malidp_plane *mp = to_malidp_plane(plane);
+               struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
+
+               if (pstate->rotation & MALIDP_ROTATED_MASK) {
+                       /* process current plane */
+                       rotated_planes--;
+
+                       if (!rotated_planes) {
+                               /* no more rotated planes, we can use what's left */
+                               rot_mem_usable = rot_mem_free;
+                       } else {
+                               if ((mp->layer->id != DE_VIDEO1) ||
+                                   (hwdev->rotation_memory[1] == 0))
+                                       rot_mem_usable = rot_mem_free / 2;
+                               else
+                                       rot_mem_usable = hwdev->rotation_memory[0];
+                       }
+
+                       rot_mem_free -= rot_mem_usable;
+
+                       if (ms->rotmem_size > rot_mem_usable)
+                               return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
+       .mode_fixup = malidp_crtc_mode_fixup,
+       .enable = malidp_crtc_enable,
+       .disable = malidp_crtc_disable,
+       .atomic_check = malidp_crtc_atomic_check,
+};
+
+static const struct drm_crtc_funcs malidp_crtc_funcs = {
+       .destroy = drm_crtc_cleanup,
+       .set_config = drm_atomic_helper_set_config,
+       .page_flip = drm_atomic_helper_page_flip,
+       .reset = drm_atomic_helper_crtc_reset,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+int malidp_crtc_init(struct drm_device *drm)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       struct drm_plane *primary = NULL, *plane;
+       int ret;
+
+       ret = malidp_de_planes_init(drm);
+       if (ret < 0) {
+               DRM_ERROR("Failed to initialise planes\n");
+               return ret;
+       }
+
+       drm_for_each_plane(plane, drm) {
+               if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+                       primary = plane;
+                       break;
+               }
+       }
+
+       if (!primary) {
+               DRM_ERROR("no primary plane found\n");
+               ret = -EINVAL;
+               goto crtc_cleanup_planes;
+       }
+
+       ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
+                                       &malidp_crtc_funcs, NULL);
+
+       if (!ret) {
+               drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
+               return 0;
+       }
+
+crtc_cleanup_planes:
+       malidp_de_planes_destroy(drm);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
new file mode 100644 (file)
index 0000000..e5b44e9
--- /dev/null
@@ -0,0 +1,512 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 KMS/DRM driver
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+
+#include "malidp_drv.h"
+#include "malidp_regs.h"
+#include "malidp_hw.h"
+
+#define MALIDP_CONF_VALID_TIMEOUT      250
+
+/*
+ * set the "config valid" bit and wait until the hardware acts on it
+ */
+static int malidp_set_and_wait_config_valid(struct drm_device *drm)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+       int ret;
+
+       hwdev->set_config_valid(hwdev);
+       /* don't wait for config_valid flag if we are in config mode */
+       if (hwdev->in_config_mode(hwdev))
+               return 0;
+
+       ret = wait_event_interruptible_timeout(malidp->wq,
+                       atomic_read(&malidp->config_valid) == 1,
+                       msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
+
+       return (ret > 0) ? 0 : -ETIMEDOUT;
+}
+
+static void malidp_output_poll_changed(struct drm_device *drm)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+
+       drm_fbdev_cma_hotplug_event(malidp->fbdev);
+}
+
+static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
+{
+       struct drm_pending_vblank_event *event;
+       struct drm_device *drm = state->dev;
+       struct malidp_drm *malidp = drm->dev_private;
+       int ret = malidp_set_and_wait_config_valid(drm);
+
+       if (ret)
+               DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+
+       event = malidp->crtc.state->event;
+       if (event) {
+               malidp->crtc.state->event = NULL;
+
+               spin_lock_irq(&drm->event_lock);
+               if (drm_crtc_vblank_get(&malidp->crtc) == 0)
+                       drm_crtc_arm_vblank_event(&malidp->crtc, event);
+               else
+                       drm_crtc_send_vblank_event(&malidp->crtc, event);
+               spin_unlock_irq(&drm->event_lock);
+       }
+       drm_atomic_helper_commit_hw_done(state);
+}
+
+static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
+{
+       struct drm_device *drm = state->dev;
+
+       drm_atomic_helper_commit_modeset_disables(drm, state);
+       drm_atomic_helper_commit_modeset_enables(drm, state);
+       drm_atomic_helper_commit_planes(drm, state, true);
+
+       malidp_atomic_commit_hw_done(state);
+
+       drm_atomic_helper_wait_for_vblanks(drm, state);
+
+       drm_atomic_helper_cleanup_planes(drm, state);
+}
+
+static struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
+       .atomic_commit_tail = malidp_atomic_commit_tail,
+};
+
+static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
+       .fb_create = drm_fb_cma_create,
+       .output_poll_changed = malidp_output_poll_changed,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int malidp_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+
+       malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+                            hwdev->map.de_irq_map.vsync_irq);
+       return 0;
+}
+
+static void malidp_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+
+       malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
+                             hwdev->map.de_irq_map.vsync_irq);
+}
+
+static int malidp_init(struct drm_device *drm)
+{
+       int ret;
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+
+       drm_mode_config_init(drm);
+
+       drm->mode_config.min_width = hwdev->min_line_size;
+       drm->mode_config.min_height = hwdev->min_line_size;
+       drm->mode_config.max_width = hwdev->max_line_size;
+       drm->mode_config.max_height = hwdev->max_line_size;
+       drm->mode_config.funcs = &malidp_mode_config_funcs;
+       drm->mode_config.helper_private = &malidp_mode_config_helpers;
+
+       ret = malidp_crtc_init(drm);
+       if (ret) {
+               drm_mode_config_cleanup(drm);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int malidp_irq_init(struct platform_device *pdev)
+{
+       int irq_de, irq_se, ret = 0;
+       struct drm_device *drm = dev_get_drvdata(&pdev->dev);
+
+       /* fetch the interrupts from DT */
+       irq_de = platform_get_irq_byname(pdev, "DE");
+       if (irq_de < 0) {
+               DRM_ERROR("no 'DE' IRQ specified!\n");
+               return irq_de;
+       }
+       irq_se = platform_get_irq_byname(pdev, "SE");
+       if (irq_se < 0) {
+               DRM_ERROR("no 'SE' IRQ specified!\n");
+               return irq_se;
+       }
+
+       ret = malidp_de_irq_init(drm, irq_de);
+       if (ret)
+               return ret;
+
+       ret = malidp_se_irq_init(drm, irq_se);
+       if (ret) {
+               malidp_de_irq_fini(drm);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void malidp_lastclose(struct drm_device *drm)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+
+       drm_fbdev_cma_restore_mode(malidp->fbdev);
+}
+
+static const struct file_operations fops = {
+       .owner = THIS_MODULE,
+       .open = drm_open,
+       .release = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = drm_compat_ioctl,
+#endif
+       .poll = drm_poll,
+       .read = drm_read,
+       .llseek = noop_llseek,
+       .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver malidp_driver = {
+       .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
+                          DRIVER_PRIME,
+       .lastclose = malidp_lastclose,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
+       .enable_vblank = malidp_enable_vblank,
+       .disable_vblank = malidp_disable_vblank,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
+       .gem_vm_ops = &drm_gem_cma_vm_ops,
+       .dumb_create = drm_gem_cma_dumb_create,
+       .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+       .dumb_destroy = drm_gem_dumb_destroy,
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+       .gem_prime_export = drm_gem_prime_export,
+       .gem_prime_import = drm_gem_prime_import,
+       .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+       .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+       .gem_prime_vmap = drm_gem_cma_prime_vmap,
+       .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+       .gem_prime_mmap = drm_gem_cma_prime_mmap,
+       .fops = &fops,
+       .name = "mali-dp",
+       .desc = "ARM Mali Display Processor driver",
+       .date = "20160106",
+       .major = 1,
+       .minor = 0,
+};
+
+static const struct of_device_id  malidp_drm_of_match[] = {
+       {
+               .compatible = "arm,mali-dp500",
+               .data = &malidp_device[MALIDP_500]
+       },
+       {
+               .compatible = "arm,mali-dp550",
+               .data = &malidp_device[MALIDP_550]
+       },
+       {
+               .compatible = "arm,mali-dp650",
+               .data = &malidp_device[MALIDP_650]
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, malidp_drm_of_match);
+
+#define MAX_OUTPUT_CHANNELS    3
+
+static int malidp_bind(struct device *dev)
+{
+       struct resource *res;
+       struct drm_device *drm;
+       struct malidp_drm *malidp;
+       struct malidp_hw_device *hwdev;
+       struct platform_device *pdev = to_platform_device(dev);
+       /* number of lines for the R, G and B output */
+       u8 output_width[MAX_OUTPUT_CHANNELS];
+       int ret = 0, i;
+       u32 version, out_depth = 0;
+
+       malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL);
+       if (!malidp)
+               return -ENOMEM;
+
+       hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL);
+       if (!hwdev)
+               return -ENOMEM;
+
+       /*
+        * copy the associated data from malidp_drm_of_match to avoid
+        * having to keep a reference to the OF node after binding
+        */
+       memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+       malidp->dev = hwdev;
+
+       INIT_LIST_HEAD(&malidp->event_list);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       hwdev->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(hwdev->regs)) {
+               DRM_ERROR("Failed to map control registers area\n");
+               return PTR_ERR(hwdev->regs);
+       }
+
+       hwdev->pclk = devm_clk_get(dev, "pclk");
+       if (IS_ERR(hwdev->pclk))
+               return PTR_ERR(hwdev->pclk);
+
+       hwdev->aclk = devm_clk_get(dev, "aclk");
+       if (IS_ERR(hwdev->aclk))
+               return PTR_ERR(hwdev->aclk);
+
+       hwdev->mclk = devm_clk_get(dev, "mclk");
+       if (IS_ERR(hwdev->mclk))
+               return PTR_ERR(hwdev->mclk);
+
+       hwdev->pxlclk = devm_clk_get(dev, "pxlclk");
+       if (IS_ERR(hwdev->pxlclk))
+               return PTR_ERR(hwdev->pxlclk);
+
+       /* Get the optional framebuffer memory resource */
+       ret = of_reserved_mem_device_init(dev);
+       if (ret && ret != -ENODEV)
+               return ret;
+
+       drm = drm_dev_alloc(&malidp_driver, dev);
+       if (!drm) {
+               ret = -ENOMEM;
+               goto alloc_fail;
+       }
+
+       /* Enable APB clock in order to get access to the registers */
+       clk_prepare_enable(hwdev->pclk);
+       /*
+        * Enable AXI clock and main clock so that prefetch can start once
+        * the registers are set
+        */
+       clk_prepare_enable(hwdev->aclk);
+       clk_prepare_enable(hwdev->mclk);
+
+       ret = hwdev->query_hw(hwdev);
+       if (ret) {
+               DRM_ERROR("Invalid HW configuration\n");
+               goto query_hw_fail;
+       }
+
+       version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+       DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
+                (version >> 12) & 0xf, (version >> 8) & 0xf);
+
+       /* set the number of lines used for output of RGB data */
+       ret = of_property_read_u8_array(dev->of_node,
+                                       "arm,malidp-output-port-lines",
+                                       output_width, MAX_OUTPUT_CHANNELS);
+       if (ret)
+               goto query_hw_fail;
+
+       for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
+               out_depth = (out_depth << 8) | (output_width[i] & 0xf);
+       malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+
+       drm->dev_private = malidp;
+       dev_set_drvdata(dev, drm);
+       atomic_set(&malidp->config_valid, 0);
+       init_waitqueue_head(&malidp->wq);
+
+       ret = malidp_init(drm);
+       if (ret < 0)
+               goto init_fail;
+
+       ret = drm_dev_register(drm, 0);
+       if (ret)
+               goto register_fail;
+
+       /* Set the CRTC's port so that the encoder component can find it */
+       malidp->crtc.port = of_graph_get_next_endpoint(dev->of_node, NULL);
+
+       ret = component_bind_all(dev, drm);
+       of_node_put(malidp->crtc.port);
+
+       if (ret) {
+               DRM_ERROR("Failed to bind all components\n");
+               goto bind_fail;
+       }
+
+       ret = malidp_irq_init(pdev);
+       if (ret < 0)
+               goto irq_init_fail;
+
+       ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+       if (ret < 0) {
+               DRM_ERROR("failed to initialise vblank\n");
+               goto vblank_fail;
+       }
+
+       drm_mode_config_reset(drm);
+
+       malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+                                          drm->mode_config.num_connector);
+
+       if (IS_ERR(malidp->fbdev)) {
+               ret = PTR_ERR(malidp->fbdev);
+               malidp->fbdev = NULL;
+               goto fbdev_fail;
+       }
+
+       drm_kms_helper_poll_init(drm);
+       return 0;
+
+fbdev_fail:
+       drm_vblank_cleanup(drm);
+vblank_fail:
+       malidp_se_irq_fini(drm);
+       malidp_de_irq_fini(drm);
+irq_init_fail:
+       component_unbind_all(dev, drm);
+bind_fail:
+       drm_dev_unregister(drm);
+register_fail:
+       malidp_de_planes_destroy(drm);
+       drm_mode_config_cleanup(drm);
+init_fail:
+       drm->dev_private = NULL;
+       dev_set_drvdata(dev, NULL);
+query_hw_fail:
+       clk_disable_unprepare(hwdev->mclk);
+       clk_disable_unprepare(hwdev->aclk);
+       clk_disable_unprepare(hwdev->pclk);
+       drm_dev_unref(drm);
+alloc_fail:
+       of_reserved_mem_device_release(dev);
+
+       return ret;
+}
+
+static void malidp_unbind(struct device *dev)
+{
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+
+       if (malidp->fbdev) {
+               drm_fbdev_cma_fini(malidp->fbdev);
+               malidp->fbdev = NULL;
+       }
+       drm_kms_helper_poll_fini(drm);
+       malidp_se_irq_fini(drm);
+       malidp_de_irq_fini(drm);
+       drm_vblank_cleanup(drm);
+       component_unbind_all(dev, drm);
+       drm_dev_unregister(drm);
+       malidp_de_planes_destroy(drm);
+       drm_mode_config_cleanup(drm);
+       drm->dev_private = NULL;
+       dev_set_drvdata(dev, NULL);
+       clk_disable_unprepare(hwdev->mclk);
+       clk_disable_unprepare(hwdev->aclk);
+       clk_disable_unprepare(hwdev->pclk);
+       drm_dev_unref(drm);
+       of_reserved_mem_device_release(dev);
+}
+
+static const struct component_master_ops malidp_master_ops = {
+       .bind = malidp_bind,
+       .unbind = malidp_unbind,
+};
+
+static int malidp_compare_dev(struct device *dev, void *data)
+{
+       struct device_node *np = data;
+
+       return dev->of_node == np;
+}
+
+static int malidp_platform_probe(struct platform_device *pdev)
+{
+       struct device_node *port, *ep;
+       struct component_match *match = NULL;
+
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
+       /* there is only one output port inside each device, find it */
+       ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+       if (!ep)
+               return -ENODEV;
+
+       if (!of_device_is_available(ep)) {
+               of_node_put(ep);
+               return -ENODEV;
+       }
+
+       /* add the remote encoder port as component */
+       port = of_graph_get_remote_port_parent(ep);
+       of_node_put(ep);
+       if (!port || !of_device_is_available(port)) {
+               of_node_put(port);
+               return -EAGAIN;
+       }
+
+       component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
+       return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
+                                              match);
+}
+
+static int malidp_platform_remove(struct platform_device *pdev)
+{
+       component_master_del(&pdev->dev, &malidp_master_ops);
+       return 0;
+}
+
+static struct platform_driver malidp_platform_driver = {
+       .probe          = malidp_platform_probe,
+       .remove         = malidp_platform_remove,
+       .driver = {
+               .name = "mali-dp",
+               .of_match_table = malidp_drm_of_match,
+       },
+};
+
+module_platform_driver(malidp_platform_driver);
+
+MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>");
+MODULE_DESCRIPTION("ARM Mali DP DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
new file mode 100644 (file)
index 0000000..95558fd
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 KMS/DRM driver structures
+ */
+
+#ifndef __MALIDP_DRV_H__
+#define __MALIDP_DRV_H__
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include "malidp_hw.h"
+
+struct malidp_drm {
+       struct malidp_hw_device *dev;
+       struct drm_fbdev_cma *fbdev;
+       struct list_head event_list;
+       struct drm_crtc crtc;
+       wait_queue_head_t wq;
+       atomic_t config_valid;
+};
+
+#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
+
+struct malidp_plane {
+       struct drm_plane base;
+       struct malidp_hw_device *hwdev;
+       const struct malidp_layer *layer;
+};
+
+struct malidp_plane_state {
+       struct drm_plane_state base;
+
+       /* size of the required rotation memory if plane is rotated */
+       u32 rotmem_size;
+};
+
+#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
+#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base)
+
+int malidp_de_planes_init(struct drm_device *drm);
+void malidp_de_planes_destroy(struct drm_device *drm);
+int malidp_crtc_init(struct drm_device *drm);
+
+/* often used combination of rotational bits */
+#define MALIDP_ROTATED_MASK    (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))
+
+#endif  /* __MALIDP_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
new file mode 100644 (file)
index 0000000..a6132f1
--- /dev/null
@@ -0,0 +1,691 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 hardware manipulation routines. This is where
+ * the difference between various versions of the hardware is being dealt with
+ * in an attempt to provide to the rest of the driver code a unified view
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <video/videomode.h>
+#include <video/display_timing.h>
+
+#include "malidp_drv.h"
+#include "malidp_hw.h"
+
+static const struct malidp_input_format malidp500_de_formats[] = {
+       /*    fourcc,   layers supporting the format,     internal id  */
+       { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  0 },
+       { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  1 },
+       { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  2 },
+       { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  3 },
+       { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  4 },
+       { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  5 },
+       { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  6 },
+       { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  7 },
+       { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  8 },
+       { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  9 },
+       { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 10 },
+       { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
+       { DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
+       { DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
+       { DRM_FORMAT_NV12, DE_VIDEO1, 14 },
+       { DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
+};
+
+#define MALIDP_ID(__group, __format) \
+       ((((__group) & 0x7) << 3) | ((__format) & 0x7))
+
+#define MALIDP_COMMON_FORMATS \
+       /*    fourcc,   layers supporting the format,      internal id   */ \
+       { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \
+       { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \
+       { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \
+       { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \
+       { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
+       { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
+       { DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
+       { DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
+       { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \
+       { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \
+       { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \
+       { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \
+       { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \
+       { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \
+       { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
+       { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
+       { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
+       { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
+       { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) },    \
+       { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) },    \
+       { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) },    \
+       { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
+
+static const struct malidp_input_format malidp550_de_formats[] = {
+       MALIDP_COMMON_FORMATS,
+};
+
+static const struct malidp_layer malidp500_layers[] = {
+       { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE },
+       { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE },
+       { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE },
+};
+
+static const struct malidp_layer malidp550_layers[] = {
+       { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE },
+       { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE },
+       { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE },
+       { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE },
+};
+
+#define MALIDP_DE_DEFAULT_PREFETCH_START       5
+
+static int malidp500_query_hw(struct malidp_hw_device *hwdev)
+{
+       u32 conf = malidp_hw_read(hwdev, MALIDP500_CONFIG_ID);
+       /* bit 4 of the CONFIG_ID register holds the line size multiplier */
+       u8 ln_size_mult = conf & 0x10 ? 2 : 1;
+
+       hwdev->min_line_size = 2;
+       hwdev->max_line_size = SZ_2K * ln_size_mult;
+       hwdev->rotation_memory[0] = SZ_1K * 64 * ln_size_mult;
+       hwdev->rotation_memory[1] = 0; /* no second rotation memory bank */
+
+       return 0;
+}
+
+static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
+{
+       u32 status, count = 100;
+
+       malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
+       while (count) {
+               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
+                       break;
+               /*
+                * entering config mode can take as long as the rendering
+                * of a full frame, hence the long sleep here
+                */
+               usleep_range(1000, 10000);
+               count--;
+       }
+       WARN(count == 0, "timeout while entering config mode");
+}
+
+static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
+{
+       u32 status, count = 100;
+
+       malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
+       while (count) {
+               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
+                       break;
+               usleep_range(100, 1000);
+               count--;
+       }
+       WARN(count == 0, "timeout while leaving config mode");
+}
+
+static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
+{
+       u32 status;
+
+       status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+       if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
+               return true;
+
+       return false;
+}
+
+static void malidp500_set_config_valid(struct malidp_hw_device *hwdev)
+{
+       malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
+}
+
+static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
+{
+       u32 val = 0;
+
+       malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
+       if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+               val |= MALIDP500_HSYNCPOL;
+       if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+               val |= MALIDP500_VSYNCPOL;
+       val |= MALIDP_DE_DEFAULT_PREFETCH_START;
+       malidp_hw_setbits(hwdev, val, MALIDP500_DC_CONTROL);
+
+       /*
+        * Mali-DP500 encodes the background color like this:
+        *    - red   @ MALIDP500_BGND_COLOR[12:0]
+        *    - green @ MALIDP500_BGND_COLOR[27:16]
+        *    - blue  @ (MALIDP500_BGND_COLOR + 4)[12:0]
+        */
+       val = ((MALIDP_BGND_COLOR_G & 0xfff) << 16) |
+             (MALIDP_BGND_COLOR_R & 0xfff);
+       malidp_hw_write(hwdev, val, MALIDP500_BGND_COLOR);
+       malidp_hw_write(hwdev, MALIDP_BGND_COLOR_B, MALIDP500_BGND_COLOR + 4);
+
+       val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
+               MALIDP_DE_H_BACKPORCH(mode->hback_porch);
+       malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
+
+       val = MALIDP500_DE_V_FRONTPORCH(mode->vfront_porch) |
+               MALIDP_DE_V_BACKPORCH(mode->vback_porch);
+       malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
+
+       val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
+               MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
+       malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
+
+       val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
+       malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
+
+       if (mode->flags & DISPLAY_FLAGS_INTERLACED)
+               malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+       else
+               malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+}
+
+static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+{
+       unsigned int depth;
+       int bpp;
+
+       /* RGB888 or BGR888 can't be rotated */
+       if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
+               return -EINVAL;
+
+       /*
+        * Each layer needs enough rotation memory to fit 8 lines
+        * worth of pixel data. Required size is then:
+        *    size = rotated_width * (bpp / 8) * 8;
+        */
+       drm_fb_get_bpp_depth(fmt, &depth, &bpp);
+
+       return w * bpp;
+}
+
+static int malidp550_query_hw(struct malidp_hw_device *hwdev)
+{
+       u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
+       u8 ln_size = (conf >> 4) & 0x3, rsize;
+
+       hwdev->min_line_size = 2;
+
+       switch (ln_size) {
+       case 0:
+               hwdev->max_line_size = SZ_2K;
+               /* two banks of 64KB for rotation memory */
+               rsize = 64;
+               break;
+       case 1:
+               hwdev->max_line_size = SZ_4K;
+               /* two banks of 128KB for rotation memory */
+               rsize = 128;
+               break;
+       case 2:
+               hwdev->max_line_size = 1280;
+               /* two banks of 40KB for rotation memory */
+               rsize = 40;
+               break;
+       case 3:
+               /* reserved value */
+               hwdev->max_line_size = 0;
+               return -EINVAL;
+       }
+
+       hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
+       return 0;
+}
+
+static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
+{
+       u32 status, count = 100;
+
+       malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
+       while (count) {
+               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
+                       break;
+               /*
+                * entering config mode can take as long as the rendering
+                * of a full frame, hence the long sleep here
+                */
+               usleep_range(1000, 10000);
+               count--;
+       }
+       WARN(count == 0, "timeout while entering config mode");
+}
+
+static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
+{
+       u32 status, count = 100;
+
+       malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
+       while (count) {
+               status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+               if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
+                       break;
+               usleep_range(100, 1000);
+               count--;
+       }
+       WARN(count == 0, "timeout while leaving config mode");
+}
+
+static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
+{
+       u32 status;
+
+       status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+       if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
+               return true;
+
+       return false;
+}
+
+static void malidp550_set_config_valid(struct malidp_hw_device *hwdev)
+{
+       malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
+}
+
+static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
+{
+       u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
+
+       malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
+       /*
+        * Mali-DP550 and Mali-DP650 encode the background color like this:
+        *   - red   @ MALIDP550_DE_BGND_COLOR[23:16]
+        *   - green @ MALIDP550_DE_BGND_COLOR[15:8]
+        *   - blue  @ MALIDP550_DE_BGND_COLOR[7:0]
+        *
+        * We need to truncate the least significant 4 bits from the default
+        * MALIDP_BGND_COLOR_x values
+        */
+       val = (((MALIDP_BGND_COLOR_R >> 4) & 0xff) << 16) |
+             (((MALIDP_BGND_COLOR_G >> 4) & 0xff) << 8) |
+             ((MALIDP_BGND_COLOR_B >> 4) & 0xff);
+       malidp_hw_write(hwdev, val, MALIDP550_DE_BGND_COLOR);
+
+       val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
+               MALIDP_DE_H_BACKPORCH(mode->hback_porch);
+       malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
+
+       val = MALIDP550_DE_V_FRONTPORCH(mode->vfront_porch) |
+               MALIDP_DE_V_BACKPORCH(mode->vback_porch);
+       malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
+
+       val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
+               MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
+       if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
+               val |= MALIDP550_HSYNCPOL;
+       if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
+               val |= MALIDP550_VSYNCPOL;
+       malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
+
+       val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
+       malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
+
+       if (mode->flags & DISPLAY_FLAGS_INTERLACED)
+               malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+       else
+               malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
+}
+
+static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+{
+       u32 bytes_per_col;
+
+       /* raw RGB888 or BGR888 can't be rotated */
+       if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
+               return -EINVAL;
+
+       switch (fmt) {
+       /* 8 lines at 4 bytes per pixel */
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+       /* 16 lines at 2 bytes per pixel */
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_YUYV:
+               bytes_per_col = 32;
+               break;
+       /* 16 lines at 1.5 bytes per pixel */
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_YUV420:
+               bytes_per_col = 24;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return w * bytes_per_col;
+}
+
+static int malidp650_query_hw(struct malidp_hw_device *hwdev)
+{
+       u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
+       u8 ln_size = (conf >> 4) & 0x3, rsize;
+
+       hwdev->min_line_size = 4;
+
+       switch (ln_size) {
+       case 0:
+       case 2:
+               /* reserved values */
+               hwdev->max_line_size = 0;
+               return -EINVAL;
+       case 1:
+               hwdev->max_line_size = SZ_4K;
+               /* two banks of 128KB for rotation memory */
+               rsize = 128;
+               break;
+       case 3:
+               hwdev->max_line_size = 2560;
+               /* two banks of 80KB for rotation memory */
+               rsize = 80;
+       }
+
+       hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
+       return 0;
+}
+
+const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+       [MALIDP_500] = {
+               .map = {
+                       .se_base = MALIDP500_SE_BASE,
+                       .dc_base = MALIDP500_DC_BASE,
+                       .out_depth_base = MALIDP500_OUTPUT_DEPTH,
+                       .features = 0,  /* no CLEARIRQ register */
+                       .n_layers = ARRAY_SIZE(malidp500_layers),
+                       .layers = malidp500_layers,
+                       .de_irq_map = {
+                               .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
+                                           MALIDP500_DE_IRQ_AXI_ERR |
+                                           MALIDP500_DE_IRQ_VSYNC |
+                                           MALIDP500_DE_IRQ_GLOBAL,
+                               .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
+                       },
+                       .se_irq_map = {
+                               .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
+                               .vsync_irq = 0,
+                       },
+                       .dc_irq_map = {
+                               .irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
+                               .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID,
+                       },
+                       .input_formats = malidp500_de_formats,
+                       .n_input_formats = ARRAY_SIZE(malidp500_de_formats),
+               },
+               .query_hw = malidp500_query_hw,
+               .enter_config_mode = malidp500_enter_config_mode,
+               .leave_config_mode = malidp500_leave_config_mode,
+               .in_config_mode = malidp500_in_config_mode,
+               .set_config_valid = malidp500_set_config_valid,
+               .modeset = malidp500_modeset,
+               .rotmem_required = malidp500_rotmem_required,
+       },
+       [MALIDP_550] = {
+               .map = {
+                       .se_base = MALIDP550_SE_BASE,
+                       .dc_base = MALIDP550_DC_BASE,
+                       .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
+                       .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+                       .n_layers = ARRAY_SIZE(malidp550_layers),
+                       .layers = malidp550_layers,
+                       .de_irq_map = {
+                               .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
+                                           MALIDP550_DE_IRQ_VSYNC,
+                               .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+                       },
+                       .se_irq_map = {
+                               .irq_mask = MALIDP550_SE_IRQ_EOW |
+                                           MALIDP550_SE_IRQ_AXI_ERR,
+                       },
+                       .dc_irq_map = {
+                               .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+                               .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
+                       },
+                       .input_formats = malidp550_de_formats,
+                       .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+               },
+               .query_hw = malidp550_query_hw,
+               .enter_config_mode = malidp550_enter_config_mode,
+               .leave_config_mode = malidp550_leave_config_mode,
+               .in_config_mode = malidp550_in_config_mode,
+               .set_config_valid = malidp550_set_config_valid,
+               .modeset = malidp550_modeset,
+               .rotmem_required = malidp550_rotmem_required,
+       },
+       [MALIDP_650] = {
+               .map = {
+                       .se_base = MALIDP550_SE_BASE,
+                       .dc_base = MALIDP550_DC_BASE,
+                       .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
+                       .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+                       .n_layers = ARRAY_SIZE(malidp550_layers),
+                       .layers = malidp550_layers,
+                       .de_irq_map = {
+                               .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
+                                           MALIDP650_DE_IRQ_DRIFT |
+                                           MALIDP550_DE_IRQ_VSYNC,
+                               .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
+                       },
+                       .se_irq_map = {
+                               .irq_mask = MALIDP550_SE_IRQ_EOW |
+                                           MALIDP550_SE_IRQ_AXI_ERR,
+                       },
+                       .dc_irq_map = {
+                               .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
+                               .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
+                       },
+                       .input_formats = malidp550_de_formats,
+                       .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+               },
+               .query_hw = malidp650_query_hw,
+               .enter_config_mode = malidp550_enter_config_mode,
+               .leave_config_mode = malidp550_leave_config_mode,
+               .in_config_mode = malidp550_in_config_mode,
+               .set_config_valid = malidp550_set_config_valid,
+               .modeset = malidp550_modeset,
+               .rotmem_required = malidp550_rotmem_required,
+       },
+};
+
+u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
+                          u8 layer_id, u32 format)
+{
+       unsigned int i;
+
+       for (i = 0; i < map->n_input_formats; i++) {
+               if (((map->input_formats[i].layer & layer_id) == layer_id) &&
+                   (map->input_formats[i].format == format))
+                       return map->input_formats[i].id;
+       }
+
+       return MALIDP_INVALID_FORMAT_ID;
+}
+
+static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
+{
+       u32 base = malidp_get_block_base(hwdev, block);
+
+       if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+               malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
+       else
+               malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
+}
+
+static irqreturn_t malidp_de_irq(int irq, void *arg)
+{
+       struct drm_device *drm = arg;
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev;
+       const struct malidp_irq_map *de;
+       u32 status, mask, dc_status;
+       irqreturn_t ret = IRQ_NONE;
+
+       if (!drm->dev_private)
+               return IRQ_HANDLED;
+
+       hwdev = malidp->dev;
+       de = &hwdev->map.de_irq_map;
+
+       /* first handle the config valid IRQ */
+       dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+       if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+               /* we have a page flip event */
+               atomic_set(&malidp->config_valid, 1);
+               malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
+               ret = IRQ_WAKE_THREAD;
+       }
+
+       status = malidp_hw_read(hwdev, MALIDP_REG_STATUS);
+       if (!(status & de->irq_mask))
+               return ret;
+
+       mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
+       status &= mask;
+       if (status & de->vsync_irq)
+               drm_crtc_handle_vblank(&malidp->crtc);
+
+       malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
+
+       return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
+}
+
+static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
+{
+       struct drm_device *drm = arg;
+       struct malidp_drm *malidp = drm->dev_private;
+
+       wake_up(&malidp->wq);
+
+       return IRQ_HANDLED;
+}
+
+int malidp_de_irq_init(struct drm_device *drm, int irq)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+       int ret;
+
+       /* ensure interrupts are disabled */
+       malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+       malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
+       malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+       malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
+
+       ret = devm_request_threaded_irq(drm->dev, irq, malidp_de_irq,
+                                       malidp_de_irq_thread_handler,
+                                       IRQF_SHARED, "malidp-de", drm);
+       if (ret < 0) {
+               DRM_ERROR("failed to install DE IRQ handler\n");
+               return ret;
+       }
+
+       /* first enable the DC block IRQs */
+       malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
+                            hwdev->map.dc_irq_map.irq_mask);
+
+       /* now enable the DE block IRQs */
+       malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
+                            hwdev->map.de_irq_map.irq_mask);
+
+       return 0;
+}
+
+void malidp_de_irq_fini(struct drm_device *drm)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+
+       malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
+                             hwdev->map.de_irq_map.irq_mask);
+       malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
+                             hwdev->map.dc_irq_map.irq_mask);
+}
+
+static irqreturn_t malidp_se_irq(int irq, void *arg)
+{
+       struct drm_device *drm = arg;
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+       u32 status, mask;
+
+       status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+       if (!(status & hwdev->map.se_irq_map.irq_mask))
+               return IRQ_NONE;
+
+       mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
+       status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+       status &= mask;
+       /* ToDo: status decoding and firing up of VSYNC and page flip events */
+
+       malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
+{
+       return IRQ_HANDLED;
+}
+
+int malidp_se_irq_init(struct drm_device *drm, int irq)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+       int ret;
+
+       /* ensure interrupts are disabled */
+       malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+       malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
+
+       ret = devm_request_threaded_irq(drm->dev, irq, malidp_se_irq,
+                                       malidp_se_irq_thread_handler,
+                                       IRQF_SHARED, "malidp-se", drm);
+       if (ret < 0) {
+               DRM_ERROR("failed to install SE IRQ handler\n");
+               return ret;
+       }
+
+       malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
+                            hwdev->map.se_irq_map.irq_mask);
+
+       return 0;
+}
+
+void malidp_se_irq_fini(struct drm_device *drm)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       struct malidp_hw_device *hwdev = malidp->dev;
+
+       malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
+                             hwdev->map.se_irq_map.irq_mask);
+}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
new file mode 100644 (file)
index 0000000..141743e
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ *
+ * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP hardware manipulation routines.
+ */
+
+#ifndef __MALIDP_HW_H__
+#define __MALIDP_HW_H__
+
+#include <linux/bitops.h>
+#include "malidp_regs.h"
+
+struct videomode;
+struct clk;
+
+/* Mali DP IP blocks */
+enum {
+       MALIDP_DE_BLOCK = 0,
+       MALIDP_SE_BLOCK,
+       MALIDP_DC_BLOCK
+};
+
+/* Mali DP layer IDs */
+enum {
+       DE_VIDEO1 = BIT(0),
+       DE_GRAPHICS1 = BIT(1),
+       DE_GRAPHICS2 = BIT(2), /* used only in DP500 */
+       DE_VIDEO2 = BIT(3),
+       DE_SMART = BIT(4),
+};
+
+struct malidp_input_format {
+       u32 format;             /* DRM fourcc */
+       u8 layer;               /* bitmask of layers supporting it */
+       u8 id;                  /* used internally */
+};
+
+#define MALIDP_INVALID_FORMAT_ID       0xff
+
+/*
+ * hide the differences between register maps
+ * by using a common structure to hold the
+ * base register offsets
+ */
+
+struct malidp_irq_map {
+       u32 irq_mask;           /* mask of IRQs that can be enabled in the block */
+       u32 vsync_irq;          /* IRQ bit used for signaling during VSYNC */
+};
+
+struct malidp_layer {
+       u16 id;                 /* layer ID */
+       u16 base;               /* address offset for the register bank */
+       u16 ptr;                /* address offset for the pointer register */
+};
+
+/* regmap features */
+#define MALIDP_REGMAP_HAS_CLEARIRQ     (1 << 0)
+
+struct malidp_hw_regmap {
+       /* address offset of the DE register bank */
+       /* is always 0x0000 */
+       /* address offset of the SE registers bank */
+       const u16 se_base;
+       /* address offset of the DC registers bank */
+       const u16 dc_base;
+
+       /* address offset for the output depth register */
+       const u16 out_depth_base;
+
+       /* bitmap with register map features */
+       const u8 features;
+
+       /* list of supported layers */
+       const u8 n_layers;
+       const struct malidp_layer *layers;
+
+       const struct malidp_irq_map de_irq_map;
+       const struct malidp_irq_map se_irq_map;
+       const struct malidp_irq_map dc_irq_map;
+
+       /* list of supported input formats for each layer */
+       const struct malidp_input_format *input_formats;
+       const u8 n_input_formats;
+};
+
+struct malidp_hw_device {
+       const struct malidp_hw_regmap map;
+       void __iomem *regs;
+
+       /* APB clock */
+       struct clk *pclk;
+       /* AXI clock */
+       struct clk *aclk;
+       /* main clock for display core */
+       struct clk *mclk;
+       /* pixel clock for display core */
+       struct clk *pxlclk;
+
+       /*
+        * Validate the driver instance against the hardware bits
+        */
+       int (*query_hw)(struct malidp_hw_device *hwdev);
+
+       /*
+        * Set the hardware into config mode, ready to accept mode changes
+        */
+       void (*enter_config_mode)(struct malidp_hw_device *hwdev);
+
+       /*
+        * Tell hardware to exit configuration mode
+        */
+       void (*leave_config_mode)(struct malidp_hw_device *hwdev);
+
+       /*
+        * Query if hardware is in configuration mode
+        */
+       bool (*in_config_mode)(struct malidp_hw_device *hwdev);
+
+       /*
+        * Set configuration valid flag for hardware parameters that can
+        * be changed outside the configuration mode. Hardware will use
+        * the new settings when config valid is set after the end of the
+        * current buffer scanout
+        */
+       void (*set_config_valid)(struct malidp_hw_device *hwdev);
+
+       /*
+        * Set a new mode in hardware. Requires the hardware to be in
+        * configuration mode before this function is called.
+        */
+       void (*modeset)(struct malidp_hw_device *hwdev, struct videomode *m);
+
+       /*
+        * Calculate the required rotation memory given the active area
+        * and the buffer format.
+        */
+       int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
+
+       u8 features;
+
+       u8 min_line_size;
+       u16 max_line_size;
+
+       /* size of memory used for rotating layers, up to two banks available */
+       u32 rotation_memory[2];
+};
+
+/* Supported variants of the hardware */
+enum {
+       MALIDP_500 = 0,
+       MALIDP_550,
+       MALIDP_650,
+       /* keep the next entry last */
+       MALIDP_MAX_DEVICES
+};
+
+extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+
+static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
+{
+       return readl(hwdev->regs + reg);
+}
+
+static inline void malidp_hw_write(struct malidp_hw_device *hwdev,
+                                  u32 value, u32 reg)
+{
+       writel(value, hwdev->regs + reg);
+}
+
+static inline void malidp_hw_setbits(struct malidp_hw_device *hwdev,
+                                    u32 mask, u32 reg)
+{
+       u32 data = malidp_hw_read(hwdev, reg);
+
+       data |= mask;
+       malidp_hw_write(hwdev, data, reg);
+}
+
+static inline void malidp_hw_clearbits(struct malidp_hw_device *hwdev,
+                                      u32 mask, u32 reg)
+{
+       u32 data = malidp_hw_read(hwdev, reg);
+
+       data &= ~mask;
+       malidp_hw_write(hwdev, data, reg);
+}
+
+static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
+                                       u8 block)
+{
+       switch (block) {
+       case MALIDP_SE_BLOCK:
+               return hwdev->map.se_base;
+       case MALIDP_DC_BLOCK:
+               return hwdev->map.dc_base;
+       }
+
+       return 0;
+}
+
+static inline void malidp_hw_disable_irq(struct malidp_hw_device *hwdev,
+                                        u8 block, u32 irq)
+{
+       u32 base = malidp_get_block_base(hwdev, block);
+
+       malidp_hw_clearbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
+}
+
+static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev,
+                                       u8 block, u32 irq)
+{
+       u32 base = malidp_get_block_base(hwdev, block);
+
+       malidp_hw_setbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
+}
+
+int malidp_de_irq_init(struct drm_device *drm, int irq);
+void malidp_de_irq_fini(struct drm_device *drm);
+int malidp_se_irq_init(struct drm_device *drm, int irq);
+void malidp_se_irq_fini(struct drm_device *drm);
+
+u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
+                          u8 layer_id, u32 format);
+
+/*
+ * background color components are defined as 12bits values,
+ * they will be shifted right when stored on hardware that
+ * supports only 8bits per channel
+ */
+#define MALIDP_BGND_COLOR_R            0x000
+#define MALIDP_BGND_COLOR_G            0x000
+#define MALIDP_BGND_COLOR_B            0x000
+
+#endif  /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
new file mode 100644 (file)
index 0000000..725098d
--- /dev/null
@@ -0,0 +1,298 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP plane manipulation routines.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "malidp_hw.h"
+#include "malidp_drv.h"
+
+/* Layer specific register offsets */
+#define MALIDP_LAYER_FORMAT            0x000
+#define MALIDP_LAYER_CONTROL           0x004
+#define   LAYER_ENABLE                 (1 << 0)
+#define   LAYER_ROT_OFFSET             8
+#define   LAYER_H_FLIP                 (1 << 10)
+#define   LAYER_V_FLIP                 (1 << 11)
+#define   LAYER_ROT_MASK               (0xf << 8)
+#define MALIDP_LAYER_SIZE              0x00c
+#define   LAYER_H_VAL(x)               (((x) & 0x1fff) << 0)
+#define   LAYER_V_VAL(x)               (((x) & 0x1fff) << 16)
+#define MALIDP_LAYER_COMP_SIZE         0x010
+#define MALIDP_LAYER_OFFSET            0x014
+#define MALIDP_LAYER_STRIDE            0x018
+
+static void malidp_de_plane_destroy(struct drm_plane *plane)
+{
+       struct malidp_plane *mp = to_malidp_plane(plane);
+
+       if (mp->base.fb)
+               drm_framebuffer_unreference(mp->base.fb);
+
+       drm_plane_helper_disable(plane);
+       drm_plane_cleanup(plane);
+       devm_kfree(plane->dev->dev, mp);
+}
+
+struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
+{
+       struct malidp_plane_state *state, *m_state;
+
+       if (!plane->state)
+               return NULL;
+
+       state = kmalloc(sizeof(*state), GFP_KERNEL);
+       if (state) {
+               m_state = to_malidp_plane_state(plane->state);
+               __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+               state->rotmem_size = m_state->rotmem_size;
+       }
+
+       return &state->base;
+}
+
+void malidp_destroy_plane_state(struct drm_plane *plane,
+                               struct drm_plane_state *state)
+{
+       struct malidp_plane_state *m_state = to_malidp_plane_state(state);
+
+       __drm_atomic_helper_plane_destroy_state(state);
+       kfree(m_state);
+}
+
+static const struct drm_plane_funcs malidp_de_plane_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = malidp_de_plane_destroy,
+       .reset = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = malidp_duplicate_plane_state,
+       .atomic_destroy_state = malidp_destroy_plane_state,
+};
+
+static int malidp_de_plane_check(struct drm_plane *plane,
+                                struct drm_plane_state *state)
+{
+       struct malidp_plane *mp = to_malidp_plane(plane);
+       struct malidp_plane_state *ms = to_malidp_plane_state(state);
+       u8 format_id;
+       u32 src_w, src_h;
+
+       if (!state->crtc || !state->fb)
+               return 0;
+
+       format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
+                                           state->fb->pixel_format);
+       if (format_id == MALIDP_INVALID_FORMAT_ID)
+               return -EINVAL;
+
+       src_w = state->src_w >> 16;
+       src_h = state->src_h >> 16;
+
+       if ((state->crtc_w > mp->hwdev->max_line_size) ||
+           (state->crtc_h > mp->hwdev->max_line_size) ||
+           (state->crtc_w < mp->hwdev->min_line_size) ||
+           (state->crtc_h < mp->hwdev->min_line_size) ||
+           (state->crtc_w != src_w) || (state->crtc_h != src_h))
+               return -EINVAL;
+
+       /* packed RGB888 / BGR888 can't be rotated or flipped */
+       if (state->rotation != BIT(DRM_ROTATE_0) &&
+           (state->fb->pixel_format == DRM_FORMAT_RGB888 ||
+            state->fb->pixel_format == DRM_FORMAT_BGR888))
+               return -EINVAL;
+
+       ms->rotmem_size = 0;
+       if (state->rotation & MALIDP_ROTATED_MASK) {
+               int val;
+
+               val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
+                                                state->crtc_w,
+                                                state->fb->pixel_format);
+               if (val < 0)
+                       return val;
+
+               ms->rotmem_size = val;
+       }
+
+       return 0;
+}
+
+static void malidp_de_plane_update(struct drm_plane *plane,
+                                  struct drm_plane_state *old_state)
+{
+       struct drm_gem_cma_object *obj;
+       struct malidp_plane *mp;
+       const struct malidp_hw_regmap *map;
+       u8 format_id;
+       u16 ptr;
+       u32 format, src_w, src_h, dest_w, dest_h, val = 0;
+       int num_planes, i;
+
+       mp = to_malidp_plane(plane);
+
+       map = &mp->hwdev->map;
+       format = plane->state->fb->pixel_format;
+       format_id = malidp_hw_get_format_id(map, mp->layer->id, format);
+       num_planes = drm_format_num_planes(format);
+
+       /* convert src values from Q16 fixed point to integer */
+       src_w = plane->state->src_w >> 16;
+       src_h = plane->state->src_h >> 16;
+       if (plane->state->rotation & MALIDP_ROTATED_MASK) {
+               dest_w = plane->state->crtc_h;
+               dest_h = plane->state->crtc_w;
+       } else {
+               dest_w = plane->state->crtc_w;
+               dest_h = plane->state->crtc_h;
+       }
+
+       malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
+
+       for (i = 0; i < num_planes; i++) {
+               /* calculate the offset for the layer's plane registers */
+               ptr = mp->layer->ptr + (i << 4);
+
+               obj = drm_fb_cma_get_gem_obj(plane->state->fb, i);
+               malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr);
+               malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4);
+               malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i],
+                               mp->layer->base + MALIDP_LAYER_STRIDE);
+       }
+
+       malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
+                       mp->layer->base + MALIDP_LAYER_SIZE);
+
+       malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
+                       mp->layer->base + MALIDP_LAYER_COMP_SIZE);
+
+       malidp_hw_write(mp->hwdev, LAYER_H_VAL(plane->state->crtc_x) |
+                       LAYER_V_VAL(plane->state->crtc_y),
+                       mp->layer->base + MALIDP_LAYER_OFFSET);
+
+       /* first clear the rotation bits in the register */
+       malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK,
+                           mp->layer->base + MALIDP_LAYER_CONTROL);
+
+       /* setup the rotation and axis flip bits */
+       if (plane->state->rotation & DRM_ROTATE_MASK)
+               val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
+       if (plane->state->rotation & BIT(DRM_REFLECT_X))
+               val |= LAYER_V_FLIP;
+       if (plane->state->rotation & BIT(DRM_REFLECT_Y))
+               val |= LAYER_H_FLIP;
+
+       /* set the 'enable layer' bit */
+       val |= LAYER_ENABLE;
+
+       malidp_hw_setbits(mp->hwdev, val,
+                         mp->layer->base + MALIDP_LAYER_CONTROL);
+}
+
+static void malidp_de_plane_disable(struct drm_plane *plane,
+                                   struct drm_plane_state *state)
+{
+       struct malidp_plane *mp = to_malidp_plane(plane);
+
+       malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE,
+                           mp->layer->base + MALIDP_LAYER_CONTROL);
+}
+
+static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
+       .atomic_check = malidp_de_plane_check,
+       .atomic_update = malidp_de_plane_update,
+       .atomic_disable = malidp_de_plane_disable,
+};
+
+int malidp_de_planes_init(struct drm_device *drm)
+{
+       struct malidp_drm *malidp = drm->dev_private;
+       const struct malidp_hw_regmap *map = &malidp->dev->map;
+       struct malidp_plane *plane = NULL;
+       enum drm_plane_type plane_type;
+       unsigned long crtcs = 1 << drm->mode_config.num_crtc;
+       u32 *formats;
+       int ret, i, j, n;
+
+       formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL);
+       if (!formats) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       for (i = 0; i < map->n_layers; i++) {
+               u8 id = map->layers[i].id;
+
+               plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+               if (!plane) {
+                       ret = -ENOMEM;
+                       goto cleanup;
+               }
+
+               /* build the list of DRM supported formats based on the map */
+               for (n = 0, j = 0;  j < map->n_input_formats; j++) {
+                       if ((map->input_formats[j].layer & id) == id)
+                               formats[n++] = map->input_formats[j].format;
+               }
+
+               plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
+                                       DRM_PLANE_TYPE_OVERLAY;
+               ret = drm_universal_plane_init(drm, &plane->base, crtcs,
+                                              &malidp_de_plane_funcs, formats,
+                                              n, plane_type, NULL);
+               if (ret < 0)
+                       goto cleanup;
+
+               if (!drm->mode_config.rotation_property) {
+                       unsigned long flags = BIT(DRM_ROTATE_0) |
+                                             BIT(DRM_ROTATE_90) |
+                                             BIT(DRM_ROTATE_180) |
+                                             BIT(DRM_ROTATE_270) |
+                                             BIT(DRM_REFLECT_X) |
+                                             BIT(DRM_REFLECT_Y);
+                       drm->mode_config.rotation_property =
+                               drm_mode_create_rotation_property(drm, flags);
+               }
+               /* SMART layer can't be rotated */
+               if (drm->mode_config.rotation_property && (id != DE_SMART))
+                       drm_object_attach_property(&plane->base.base,
+                                                  drm->mode_config.rotation_property,
+                                                  BIT(DRM_ROTATE_0));
+
+               drm_plane_helper_add(&plane->base,
+                                    &malidp_de_plane_helper_funcs);
+               plane->hwdev = malidp->dev;
+               plane->layer = &map->layers[i];
+       }
+
+       kfree(formats);
+
+       return 0;
+
+cleanup:
+       malidp_de_planes_destroy(drm);
+       kfree(formats);
+
+       return ret;
+}
+
+void malidp_de_planes_destroy(struct drm_device *drm)
+{
+       struct drm_plane *p, *pt;
+
+       list_for_each_entry_safe(p, pt, &drm->mode_config.plane_list, head) {
+               drm_plane_cleanup(p);
+               kfree(p);
+       }
+}
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
new file mode 100644 (file)
index 0000000..73fecb3
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * ARM Mali DP500/DP550/DP650 registers definition.
+ */
+
+#ifndef __MALIDP_REGS_H__
+#define __MALIDP_REGS_H__
+
+/*
+ * abbreviations used:
+ *    - DC - display core (general settings)
+ *    - DE - display engine
+ *    - SE - scaling engine
+ */
+
+/* interrupt bit masks */
+#define MALIDP_DE_IRQ_UNDERRUN                 (1 << 0)
+
+#define MALIDP500_DE_IRQ_AXI_ERR               (1 << 4)
+#define MALIDP500_DE_IRQ_VSYNC                 (1 << 5)
+#define MALIDP500_DE_IRQ_PROG_LINE             (1 << 6)
+#define MALIDP500_DE_IRQ_SATURATION            (1 << 7)
+#define MALIDP500_DE_IRQ_CONF_VALID            (1 << 8)
+#define MALIDP500_DE_IRQ_CONF_MODE             (1 << 11)
+#define MALIDP500_DE_IRQ_CONF_ACTIVE           (1 << 17)
+#define MALIDP500_DE_IRQ_PM_ACTIVE             (1 << 18)
+#define MALIDP500_DE_IRQ_TESTMODE_ACTIVE       (1 << 19)
+#define MALIDP500_DE_IRQ_FORCE_BLNK_ACTIVE     (1 << 24)
+#define MALIDP500_DE_IRQ_AXI_BUSY              (1 << 28)
+#define MALIDP500_DE_IRQ_GLOBAL                        (1 << 31)
+#define MALIDP500_SE_IRQ_CONF_MODE             (1 << 0)
+#define MALIDP500_SE_IRQ_CONF_VALID            (1 << 4)
+#define MALIDP500_SE_IRQ_INIT_BUSY             (1 << 5)
+#define MALIDP500_SE_IRQ_AXI_ERROR             (1 << 8)
+#define MALIDP500_SE_IRQ_OVERRUN               (1 << 9)
+#define MALIDP500_SE_IRQ_PROG_LINE1            (1 << 12)
+#define MALIDP500_SE_IRQ_PROG_LINE2            (1 << 13)
+#define MALIDP500_SE_IRQ_CONF_ACTIVE           (1 << 17)
+#define MALIDP500_SE_IRQ_PM_ACTIVE             (1 << 18)
+#define MALIDP500_SE_IRQ_AXI_BUSY              (1 << 28)
+#define MALIDP500_SE_IRQ_GLOBAL                        (1 << 31)
+
+#define MALIDP550_DE_IRQ_SATURATION            (1 << 8)
+#define MALIDP550_DE_IRQ_VSYNC                 (1 << 12)
+#define MALIDP550_DE_IRQ_PROG_LINE             (1 << 13)
+#define MALIDP550_DE_IRQ_AXI_ERR               (1 << 16)
+#define MALIDP550_SE_IRQ_EOW                   (1 << 0)
+#define MALIDP550_SE_IRQ_AXI_ERR               (1 << 16)
+#define MALIDP550_DC_IRQ_CONF_VALID            (1 << 0)
+#define MALIDP550_DC_IRQ_CONF_MODE             (1 << 4)
+#define MALIDP550_DC_IRQ_CONF_ACTIVE           (1 << 16)
+#define MALIDP550_DC_IRQ_DE                    (1 << 20)
+#define MALIDP550_DC_IRQ_SE                    (1 << 24)
+
+#define MALIDP650_DE_IRQ_DRIFT                 (1 << 4)
+
+/* bit masks that are common between products */
+#define   MALIDP_CFG_VALID             (1 << 0)
+#define   MALIDP_DISP_FUNC_ILACED      (1 << 8)
+
+/* register offsets for IRQ management */
+#define MALIDP_REG_STATUS              0x00000
+#define MALIDP_REG_SETIRQ              0x00004
+#define MALIDP_REG_MASKIRQ             0x00008
+#define MALIDP_REG_CLEARIRQ            0x0000c
+
+/* register offsets */
+#define MALIDP_DE_CORE_ID              0x00018
+#define MALIDP_DE_DISPLAY_FUNC         0x00020
+
+/* these offsets are relative to MALIDP5x0_TIMINGS_BASE */
+#define MALIDP_DE_H_TIMINGS            0x0
+#define MALIDP_DE_V_TIMINGS            0x4
+#define MALIDP_DE_SYNC_WIDTH           0x8
+#define MALIDP_DE_HV_ACTIVE            0xc
+
+/* macros to set values into registers */
+#define MALIDP_DE_H_FRONTPORCH(x)      (((x) & 0xfff) << 0)
+#define MALIDP_DE_H_BACKPORCH(x)       (((x) & 0x3ff) << 16)
+#define MALIDP500_DE_V_FRONTPORCH(x)   (((x) & 0xff) << 0)
+#define MALIDP550_DE_V_FRONTPORCH(x)   (((x) & 0xfff) << 0)
+#define MALIDP_DE_V_BACKPORCH(x)       (((x) & 0xff) << 16)
+#define MALIDP_DE_H_SYNCWIDTH(x)       (((x) & 0x3ff) << 0)
+#define MALIDP_DE_V_SYNCWIDTH(x)       (((x) & 0xff) << 16)
+#define MALIDP_DE_H_ACTIVE(x)          (((x) & 0x1fff) << 0)
+#define MALIDP_DE_V_ACTIVE(x)          (((x) & 0x1fff) << 16)
+
+/* register offsets and bits specific to DP500 */
+#define MALIDP500_DC_BASE              0x00000
+#define MALIDP500_DC_CONTROL           0x0000c
+#define   MALIDP500_DC_CONFIG_REQ      (1 << 17)
+#define   MALIDP500_HSYNCPOL           (1 << 20)
+#define   MALIDP500_VSYNCPOL           (1 << 21)
+#define   MALIDP500_DC_CLEAR_MASK      0x300fff
+#define MALIDP500_DE_LINE_COUNTER      0x00010
+#define MALIDP500_DE_AXI_CONTROL       0x00014
+#define MALIDP500_DE_SECURE_CTRL       0x0001c
+#define MALIDP500_DE_CHROMA_KEY                0x00024
+#define MALIDP500_TIMINGS_BASE         0x00028
+
+#define MALIDP500_CONFIG_3D            0x00038
+#define MALIDP500_BGND_COLOR           0x0003c
+#define MALIDP500_OUTPUT_DEPTH         0x00044
+#define MALIDP500_YUV_RGB_COEF         0x00048
+#define MALIDP500_COLOR_ADJ_COEF       0x00078
+#define MALIDP500_COEF_TABLE_ADDR      0x000a8
+#define MALIDP500_COEF_TABLE_DATA      0x000ac
+#define MALIDP500_DE_LV_BASE           0x00100
+#define MALIDP500_DE_LV_PTR_BASE       0x00124
+#define MALIDP500_DE_LG1_BASE          0x00200
+#define MALIDP500_DE_LG1_PTR_BASE      0x0021c
+#define MALIDP500_DE_LG2_BASE          0x00300
+#define MALIDP500_DE_LG2_PTR_BASE      0x0031c
+#define MALIDP500_SE_BASE              0x00c00
+#define MALIDP500_SE_PTR_BASE          0x00e0c
+#define MALIDP500_DC_IRQ_BASE          0x00f00
+#define MALIDP500_CONFIG_VALID         0x00f00
+#define MALIDP500_CONFIG_ID            0x00fd4
+
+/* register offsets and bits specific to DP550/DP650 */
+#define MALIDP550_DE_CONTROL           0x00010
+#define MALIDP550_DE_LINE_COUNTER      0x00014
+#define MALIDP550_DE_AXI_CONTROL       0x00018
+#define MALIDP550_DE_QOS               0x0001c
+#define MALIDP550_TIMINGS_BASE         0x00030
+#define MALIDP550_HSYNCPOL             (1 << 12)
+#define MALIDP550_VSYNCPOL             (1 << 28)
+
+#define MALIDP550_DE_DISP_SIDEBAND     0x00040
+#define MALIDP550_DE_BGND_COLOR                0x00044
+#define MALIDP550_DE_OUTPUT_DEPTH      0x0004c
+#define MALIDP550_DE_COLOR_COEF                0x00050
+#define MALIDP550_DE_COEF_TABLE_ADDR   0x00080
+#define MALIDP550_DE_COEF_TABLE_DATA   0x00084
+#define MALIDP550_DE_LV1_BASE          0x00100
+#define MALIDP550_DE_LV1_PTR_BASE      0x00124
+#define MALIDP550_DE_LV2_BASE          0x00200
+#define MALIDP550_DE_LV2_PTR_BASE      0x00224
+#define MALIDP550_DE_LG_BASE           0x00300
+#define MALIDP550_DE_LG_PTR_BASE       0x0031c
+#define MALIDP550_DE_LS_BASE           0x00400
+#define MALIDP550_DE_LS_PTR_BASE       0x0042c
+#define MALIDP550_DE_PERF_BASE         0x00500
+#define MALIDP550_SE_BASE              0x08000
+#define MALIDP550_DC_BASE              0x0c000
+#define MALIDP550_DC_CONTROL           0x0c010
+#define   MALIDP550_DC_CONFIG_REQ      (1 << 16)
+#define MALIDP550_CONFIG_VALID         0x0c014
+#define MALIDP550_CONFIG_ID            0x0ffd4
+
+/*
+ * Starting with DP550 the register map blocks has been standardised to the
+ * following layout:
+ *
+ *   Offset            Block registers
+ *  0x00000            Display Engine
+ *  0x08000            Scaling Engine
+ *  0x0c000            Display Core
+ *  0x10000            Secure control
+ *
+ * The old DP500 IP mixes some DC with the DE registers, hence the need
+ * for a mapping structure.
+ */
+
+#endif /* __MALIDP_REGS_H__ */
index eb773e9af313a6715f21dcc1ede2cf98eee07afe..15f3ecfb16f1f794caca89e282811b2083e84e80 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_ARMADA
        tristate "DRM support for Marvell Armada SoCs"
        depends on DRM && HAVE_CLK && ARM
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        help
          Support the "LCD" controllers found on the Marvell Armada 510
          devices.  There are two controllers on the device, each controller
index 3130aa8bcdd0a18d074d285e45d3bfed44a6fc8a..2f58e9e2a59cb4346e339a43953e3ade8941889a 100644 (file)
@@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
        /* Handle any pending frame work. */
        if (work) {
                work->fn(dcrtc, plane, work);
-               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+               drm_crtc_vblank_put(&dcrtc->crtc);
        }
 
        wake_up(&plane->frame_wait);
@@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
 {
        int ret;
 
-       ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+       ret = drm_crtc_vblank_get(&dcrtc->crtc);
        if (ret) {
                DRM_ERROR("failed to acquire vblank counter\n");
                return ret;
@@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
 
        ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
        if (ret)
-               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+               drm_crtc_vblank_put(&dcrtc->crtc);
 
        return ret;
 }
@@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel(
        struct armada_plane_work *work = xchg(&plane->work, NULL);
 
        if (work)
-               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+               drm_crtc_vblank_put(&dcrtc->crtc);
 
        return work;
 }
@@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
 
        if (fwork->event) {
                spin_lock_irqsave(&dev->event_lock, flags);
-               drm_send_vblank_event(dev, dcrtc->num, fwork->event);
+               drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event);
                spin_unlock_irqrestore(&dev->event_lock, flags);
        }
 
@@ -410,7 +410,7 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
                DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
 
        if (stat & VSYNC_IRQ)
-               drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+               drm_crtc_handle_vblank(&dcrtc->crtc);
 
        spin_lock(&dcrtc->irq_lock);
        ovl_plane = dcrtc->plane;
@@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
 
        if (interlaced ^ dcrtc->interlaced) {
                if (adj->flags & DRM_MODE_FLAG_INTERLACE)
-                       drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+                       drm_crtc_vblank_get(&dcrtc->crtc);
                else
-                       drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+                       drm_crtc_vblank_put(&dcrtc->crtc);
                dcrtc->interlaced = interlaced;
        }
 
index 439824a61aa5276e465975a8ae79e8ab6da7c552..f5ebdd68144579c12dea0c8342ce8dd24e89e2e3 100644 (file)
@@ -189,7 +189,6 @@ static struct drm_driver armada_drm_driver = {
        .load                   = armada_drm_load,
        .lastclose              = armada_drm_lastclose,
        .unload                 = armada_drm_unload,
-       .set_busid              = drm_platform_set_busid,
        .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = armada_drm_enable_vblank,
        .disable_vblank         = armada_drm_disable_vblank,
@@ -197,7 +196,7 @@ static struct drm_driver armada_drm_driver = {
        .debugfs_init           = armada_drm_debugfs_init,
        .debugfs_cleanup        = armada_drm_debugfs_cleanup,
 #endif
-       .gem_free_object        = armada_gem_free_object,
+       .gem_free_object_unlocked = armada_gem_free_object,
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
        .gem_prime_export       = armada_gem_prime_export,
index 148e8a42b2c690858a7688f881f50d208338cb96..1ee707ef6b8d0d0b51033903dbe3a189de700f80 100644 (file)
@@ -121,6 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        int ret;
 
        ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
+                                           BIT(DRM_ROTATE_0),
                                            0, INT_MAX, true, false, &visible);
        if (ret)
                return ret;
index 8a784c460c89d8963effbb25b4509878d2003bc5..15f6ce7acb2a21400b0d39765b90241a839f525a 100644 (file)
@@ -2,11 +2,7 @@ config DRM_AST
        tristate "AST server chips"
        depends on DRM && PCI
        select DRM_TTM
-       select FB_SYS_COPYAREA
-       select FB_SYS_FILLRECT
-       select FB_SYS_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        help
         Say yes for experimental AST GPU driver. Do not enable
index fcd9c0714836cdd2175c956d78226930846c0aca..f54afd2113a9700d38fc2978055438e9013e8843 100644 (file)
@@ -209,7 +209,7 @@ static struct drm_driver driver = {
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
 
-       .gem_free_object = ast_gem_free_object,
+       .gem_free_object_unlocked = ast_gem_free_object,
        .dumb_create = ast_dumb_create,
        .dumb_map_offset = ast_dumb_mmap_offset,
        .dumb_destroy = drm_gem_dumb_destroy,
index 5320f8c57884567dd814aaac3ff595fde5f1425f..c017a9330a18991cbd05eaa9be7cfeef557de98e 100644 (file)
@@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
                               struct drm_gem_object **gobj_p)
 {
        struct drm_device *dev = afbdev->helper.dev;
-       u32 bpp, depth;
        u32 size;
        struct drm_gem_object *gobj;
-
        int ret = 0;
-       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
 
        size = mode_cmd->pitches[0] * mode_cmd->height;
        ret = ast_gem_create(dev, size, true, &gobj);
index 7bc3aa6dda8cff86f78ec0400bac76d852ebec95..904beaa932d03f0b5ce7d89a13d16729582052e0 100644 (file)
@@ -295,9 +295,8 @@ static int ast_get_dram_info(struct drm_device *dev)
 static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
-       if (ast_fb->obj)
-               drm_gem_object_unreference_unlocked(ast_fb->obj);
 
+       drm_gem_object_unreference_unlocked(ast_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(fb);
 }
index c337922606e322ae9e0abdf57fc4196849f0bc39..5957c3e659feeb7e4ead966b5912a1584d3bb2a9 100644 (file)
@@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
 
 }
 
-static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-                                u16 *blue, uint32_t start, uint32_t size)
+static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                             u16 *blue, uint32_t size)
 {
        struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
-       int end = (start + size > 256) ? 256 : start + size, i;
+       int i;
 
        /* userspace palettes are always correct as is */
-       for (i = start; i < end; i++) {
+       for (i = 0; i < size; i++) {
                ast_crtc->lut_r[i] = red[i] >> 8;
                ast_crtc->lut_g[i] = green[i] >> 8;
                ast_crtc->lut_b[i] = blue[i] >> 8;
        }
        ast_crtc_load_lut(crtc);
+
+       return 0;
 }
 
 
index 59f2f93b6f84859ea04c9d092df3f276d2f6a7a6..b29a41218fc92d8c6c6cfe7157b061e69f29fb24 100644 (file)
@@ -186,17 +186,6 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
 {
 }
 
-static int ast_bo_move(struct ttm_buffer_object *bo,
-                      bool evict, bool interruptible,
-                      bool no_wait_gpu,
-                      struct ttm_mem_reg *new_mem)
-{
-       int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-       return r;
-}
-
-
 static void ast_ttm_backend_destroy(struct ttm_tt *tt)
 {
        ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
        .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
        .init_mem_type = ast_bo_init_mem_type,
        .evict_flags = ast_bo_evict_flags,
-       .move = ast_bo_move,
+       .move = NULL,
        .verify_access = ast_bo_verify_access,
        .io_mem_reserve = &ast_ttm_io_mem_reserve,
        .io_mem_free = &ast_ttm_io_mem_free,
index 99b4f0698a3035c08b3d2836a34aa923cbbc6bf8..32bcc4bad06ac4cb18134b511e2b31b62c7b1ef0 100644 (file)
@@ -3,7 +3,6 @@ config DRM_ATMEL_HLCDC
        depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_PANEL
        help
index cf23a755f777f8b4df9fcc6b3032c01415c14bd8..a978381ef95bfb3e6562ba34bccf26f0b2208ff4 100644 (file)
@@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
 
        spin_lock_irqsave(&dev->event_lock, flags);
        if (crtc->event) {
-               drm_send_vblank_event(dev, crtc->id, crtc->event);
-               drm_vblank_put(dev, crtc->id);
+               drm_crtc_send_vblank_event(&crtc->base, crtc->event);
+               drm_crtc_vblank_put(&crtc->base);
                crtc->event = NULL;
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -383,7 +383,7 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
 
 void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
 {
-       drm_handle_vblank(c->dev, 0);
+       drm_crtc_handle_vblank(c);
        atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
 }
 
@@ -391,12 +391,11 @@ void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
 {
        struct atmel_hlcdc_crtc_state *state;
 
-       if (crtc->state && crtc->state->mode_blob)
-               drm_property_unreference_blob(crtc->state->mode_blob);
-
        if (crtc->state) {
+               __drm_atomic_helper_crtc_destroy_state(crtc->state);
                state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
                kfree(state);
+               crtc->state = NULL;
        }
 
        state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -415,8 +414,9 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc)
                return NULL;
 
        state = kmalloc(sizeof(*state), GFP_KERNEL);
-       if (state)
-               __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+       if (!state)
+               return NULL;
+       __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
 
        cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
        state->output_mode = cur->output_mode;
index 8ded7645747e503ed6d3a56ae15a4417b39f5ac8..d4a3d61b7b06edaa869b03a17e1da51a76580277 100644 (file)
@@ -519,7 +519,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
        }
 
        /* Swap the state, this is the point of no return. */
-       drm_atomic_helper_swap_state(dev, state);
+       drm_atomic_helper_swap_state(state, true);
 
        if (async)
                queue_work(dc->wq, &commit->work);
@@ -691,13 +691,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
        destroy_workqueue(dc->wq);
 }
 
-static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
-{
-       mutex_lock(&dev->mode_config.mutex);
-       drm_connector_unregister_all(dev);
-       mutex_unlock(&dev->mode_config.mutex);
-}
-
 static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
 {
        struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -776,7 +769,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
        .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank = atmel_hlcdc_dc_enable_vblank,
        .disable_vblank = atmel_hlcdc_dc_disable_vblank,
-       .gem_free_object = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops = &drm_gem_cma_vm_ops,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -815,15 +808,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
        if (ret)
                goto err_unload;
 
-       ret = drm_connector_register_all(ddev);
-       if (ret)
-               goto err_unregister;
-
        return 0;
 
-err_unregister:
-       drm_dev_unregister(ddev);
-
 err_unload:
        atmel_hlcdc_dc_unload(ddev);
 
@@ -837,7 +823,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
 {
        struct drm_device *ddev = platform_get_drvdata(pdev);
 
-       atmel_hlcdc_dc_connector_unplug_all(ddev);
        drm_dev_unregister(ddev);
        atmel_hlcdc_dc_unload(ddev);
        drm_dev_unref(ddev);
index 39802c0539b6ba5e2a3361f1441c20da2c5de2ec..6119b50855013522880f4e725b9ac8eb06372b26 100644 (file)
@@ -113,21 +113,9 @@ static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
        return atmel_hlcdc_dc_mode_valid(rgb->dc, mode);
 }
 
-
-
-static struct drm_encoder *
-atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
-{
-       struct atmel_hlcdc_rgb_output *rgb =
-                       drm_connector_to_atmel_hlcdc_rgb_output(connector);
-
-       return &rgb->encoder;
-}
-
 static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
        .get_modes = atmel_hlcdc_panel_get_modes,
        .mode_valid = atmel_hlcdc_rgb_mode_valid,
-       .best_encoder = atmel_hlcdc_rgb_best_encoder,
 };
 
 static enum drm_connector_status
@@ -266,9 +254,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
                if (!ret)
                        ret = atmel_hlcdc_check_endpoint(dev, &ep);
 
-               of_node_put(ep_np);
-               if (ret)
+               if (ret) {
+                       of_node_put(ep_np);
                        return ret;
+               }
        }
 
        for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
@@ -276,9 +265,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
                if (!ret)
                        ret = atmel_hlcdc_attach_endpoint(dev, &ep);
 
-               of_node_put(ep_np);
-               if (ret)
+               if (ret) {
+                       of_node_put(ep_np);
                        return ret;
+               }
        }
 
        return 0;
index aef3ca8a81fae37c31c84ba13eb45ff374dab83c..016c191221f35db2246a3e014b8a4f7294d22649 100644 (file)
@@ -339,6 +339,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
 
                atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
                                             factor_reg);
+       } else {
+               atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
        }
 }
 
index 5f8b0c2b9a4461d2e07b1d7a3abb25f69837fc90..f739763f47ce8cedd91205002e6f3fca02126739 100644 (file)
@@ -2,10 +2,6 @@ config DRM_BOCHS
        tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
        depends on DRM && PCI
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
        select DRM_TTM
        help
          Choose this option for qemu.
index b332b4d3b0e284463447519924ecfe08086615ef..abace82de6eac2de22b747ff54a5612443f69647 100644 (file)
@@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = {
        .date                   = "20130925",
        .major                  = 1,
        .minor                  = 0,
-       .gem_free_object        = bochs_gem_free_object,
+       .gem_free_object_unlocked = bochs_gem_free_object,
        .dumb_create            = bochs_dumb_create,
        .dumb_map_offset        = bochs_dumb_mmap_offset,
        .dumb_destroy           = drm_gem_dumb_destroy,
index 6cf912c45e481c1d6a323d40bea32bc0073c275c..5c5638a777a12ad12c1e0fdc8d9627d7d78ea50b 100644 (file)
@@ -165,15 +165,6 @@ static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
 {
 }
 
-static int bochs_bo_move(struct ttm_buffer_object *bo,
-                        bool evict, bool interruptible,
-                        bool no_wait_gpu,
-                        struct ttm_mem_reg *new_mem)
-{
-       return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-}
-
-
 static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
 {
        ttm_tt_fini(tt);
@@ -208,7 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
        .ttm_tt_unpopulate = ttm_pool_unpopulate,
        .init_mem_type = bochs_bo_init_mem_type,
        .evict_flags = bochs_bo_evict_flags,
-       .move = bochs_bo_move,
+       .move = NULL,
        .verify_access = bochs_bo_verify_access,
        .io_mem_reserve = &bochs_ttm_io_mem_reserve,
        .io_mem_free = &bochs_ttm_io_mem_free,
@@ -474,8 +465,8 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
 static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
-       if (bochs_fb->obj)
-               drm_gem_object_unreference_unlocked(bochs_fb->obj);
+
+       drm_gem_object_unreference_unlocked(bochs_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(fb);
 }
index 8f7423f18da5d77ed60bbbb5ccfed55bcf01f308..b590e678052dc1c34fa635487a5da744d6aaa2dd 100644 (file)
@@ -50,6 +50,25 @@ config DRM_PARADE_PS8622
        ---help---
          Parade eDP-LVDS bridge chip driver.
 
+config DRM_SII902X
+       tristate "Silicon Image sii902x RGB/HDMI bridge"
+       depends on OF
+       select DRM_KMS_HELPER
+       select REGMAP_I2C
+       ---help---
+         Silicon Image sii902x bridge chip driver.
+
+config DRM_TOSHIBA_TC358767
+       tristate "Toshiba TC358767 eDP bridge"
+       depends on OF
+       select DRM_KMS_HELPER
+       select REGMAP_I2C
+       select DRM_PANEL
+       ---help---
+         Toshiba TC358767 eDP bridge chip driver.
+
 source "drivers/gpu/drm/bridge/analogix/Kconfig"
 
+source "drivers/gpu/drm/bridge/adv7511/Kconfig"
+
 endmenu
index 96b13b30e6ab53e9593ecc002286c16e30ae8e08..efdb07e878f5c5623d7645fb2c499d219210e37a 100644 (file)
@@ -5,4 +5,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
 obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
 obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
 obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_SII902X) += sii902x.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
 obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
new file mode 100644 (file)
index 0000000..d2b0499
--- /dev/null
@@ -0,0 +1,15 @@
+config DRM_I2C_ADV7511
+       tristate "AV7511 encoder"
+       depends on OF
+       select DRM_KMS_HELPER
+       select REGMAP_I2C
+       help
+         Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+
+config DRM_I2C_ADV7533
+       bool "ADV7533 encoder"
+       depends on DRM_I2C_ADV7511
+       select DRM_MIPI_DSI
+       default y
+       help
+         Support for the Analog Devices ADV7533 DSI to HDMI encoder.
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
new file mode 100644 (file)
index 0000000..9019327
--- /dev/null
@@ -0,0 +1,3 @@
+adv7511-y := adv7511_drv.o
+adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
new file mode 100644 (file)
index 0000000..161c923
--- /dev/null
@@ -0,0 +1,392 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __DRM_I2C_ADV7511_H__
+#define __DRM_I2C_ADV7511_H__
+
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define ADV7511_REG_CHIP_REVISION              0x00
+#define ADV7511_REG_N0                         0x01
+#define ADV7511_REG_N1                         0x02
+#define ADV7511_REG_N2                         0x03
+#define ADV7511_REG_SPDIF_FREQ                 0x04
+#define ADV7511_REG_CTS_AUTOMATIC1             0x05
+#define ADV7511_REG_CTS_AUTOMATIC2             0x06
+#define ADV7511_REG_CTS_MANUAL0                        0x07
+#define ADV7511_REG_CTS_MANUAL1                        0x08
+#define ADV7511_REG_CTS_MANUAL2                        0x09
+#define ADV7511_REG_AUDIO_SOURCE               0x0a
+#define ADV7511_REG_AUDIO_CONFIG               0x0b
+#define ADV7511_REG_I2S_CONFIG                 0x0c
+#define ADV7511_REG_I2S_WIDTH                  0x0d
+#define ADV7511_REG_AUDIO_SUB_SRC0             0x0e
+#define ADV7511_REG_AUDIO_SUB_SRC1             0x0f
+#define ADV7511_REG_AUDIO_SUB_SRC2             0x10
+#define ADV7511_REG_AUDIO_SUB_SRC3             0x11
+#define ADV7511_REG_AUDIO_CFG1                 0x12
+#define ADV7511_REG_AUDIO_CFG2                 0x13
+#define ADV7511_REG_AUDIO_CFG3                 0x14
+#define ADV7511_REG_I2C_FREQ_ID_CFG            0x15
+#define ADV7511_REG_VIDEO_INPUT_CFG1           0x16
+#define ADV7511_REG_CSC_UPPER(x)               (0x18 + (x) * 2)
+#define ADV7511_REG_CSC_LOWER(x)               (0x19 + (x) * 2)
+#define ADV7511_REG_SYNC_DECODER(x)            (0x30 + (x))
+#define ADV7511_REG_DE_GENERATOR               (0x35 + (x))
+#define ADV7511_REG_PIXEL_REPETITION           0x3b
+#define ADV7511_REG_VIC_MANUAL                 0x3c
+#define ADV7511_REG_VIC_SEND                   0x3d
+#define ADV7511_REG_VIC_DETECTED               0x3e
+#define ADV7511_REG_AUX_VIC_DETECTED           0x3f
+#define ADV7511_REG_PACKET_ENABLE0             0x40
+#define ADV7511_REG_POWER                      0x41
+#define ADV7511_REG_STATUS                     0x42
+#define ADV7511_REG_EDID_I2C_ADDR              0x43
+#define ADV7511_REG_PACKET_ENABLE1             0x44
+#define ADV7511_REG_PACKET_I2C_ADDR            0x45
+#define ADV7511_REG_DSD_ENABLE                 0x46
+#define ADV7511_REG_VIDEO_INPUT_CFG2           0x48
+#define ADV7511_REG_INFOFRAME_UPDATE           0x4a
+#define ADV7511_REG_GC(x)                      (0x4b + (x)) /* 0x4b - 0x51 */
+#define ADV7511_REG_AVI_INFOFRAME_VERSION      0x52
+#define ADV7511_REG_AVI_INFOFRAME_LENGTH       0x53
+#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM     0x54
+#define ADV7511_REG_AVI_INFOFRAME(x)           (0x55 + (x)) /* 0x55 - 0x6f */
+#define ADV7511_REG_AUDIO_INFOFRAME_VERSION    0x70
+#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH     0x71
+#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM   0x72
+#define ADV7511_REG_AUDIO_INFOFRAME(x)         (0x73 + (x)) /* 0x73 - 0x7c */
+#define ADV7511_REG_INT_ENABLE(x)              (0x94 + (x))
+#define ADV7511_REG_INT(x)                     (0x96 + (x))
+#define ADV7511_REG_INPUT_CLK_DIV              0x9d
+#define ADV7511_REG_PLL_STATUS                 0x9e
+#define ADV7511_REG_HDMI_POWER                 0xa1
+#define ADV7511_REG_HDCP_HDMI_CFG              0xaf
+#define ADV7511_REG_AN(x)                      (0xb0 + (x)) /* 0xb0 - 0xb7 */
+#define ADV7511_REG_HDCP_STATUS                        0xb8
+#define ADV7511_REG_BCAPS                      0xbe
+#define ADV7511_REG_BKSV(x)                    (0xc0 + (x)) /* 0xc0 - 0xc3 */
+#define ADV7511_REG_EDID_SEGMENT               0xc4
+#define ADV7511_REG_DDC_STATUS                 0xc8
+#define ADV7511_REG_EDID_READ_CTRL             0xc9
+#define ADV7511_REG_BSTATUS(x)                 (0xca + (x)) /* 0xca - 0xcb */
+#define ADV7511_REG_TIMING_GEN_SEQ             0xd0
+#define ADV7511_REG_POWER2                     0xd6
+#define ADV7511_REG_HSYNC_PLACEMENT_MSB                0xfa
+
+#define ADV7511_REG_SYNC_ADJUSTMENT(x)         (0xd7 + (x)) /* 0xd7 - 0xdc */
+#define ADV7511_REG_TMDS_CLOCK_INV             0xde
+#define ADV7511_REG_ARC_CTRL                   0xdf
+#define ADV7511_REG_CEC_I2C_ADDR               0xe1
+#define ADV7511_REG_CEC_CTRL                   0xe2
+#define ADV7511_REG_CHIP_ID_HIGH               0xf5
+#define ADV7511_REG_CHIP_ID_LOW                        0xf6
+
+#define ADV7511_CSC_ENABLE                     BIT(7)
+#define ADV7511_CSC_UPDATE_MODE                        BIT(5)
+
+#define ADV7511_INT0_HPD                       BIT(7)
+#define ADV7511_INT0_VSYNC                     BIT(5)
+#define ADV7511_INT0_AUDIO_FIFO_FULL           BIT(4)
+#define ADV7511_INT0_EDID_READY                        BIT(2)
+#define ADV7511_INT0_HDCP_AUTHENTICATED                BIT(1)
+
+#define ADV7511_INT1_DDC_ERROR                 BIT(7)
+#define ADV7511_INT1_BKSV                      BIT(6)
+#define ADV7511_INT1_CEC_TX_READY              BIT(5)
+#define ADV7511_INT1_CEC_TX_ARBIT_LOST         BIT(4)
+#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT      BIT(3)
+#define ADV7511_INT1_CEC_RX_READY3             BIT(2)
+#define ADV7511_INT1_CEC_RX_READY2             BIT(1)
+#define ADV7511_INT1_CEC_RX_READY1             BIT(0)
+
+#define ADV7511_ARC_CTRL_POWER_DOWN            BIT(0)
+
+#define ADV7511_CEC_CTRL_POWER_DOWN            BIT(0)
+
+#define ADV7511_POWER_POWER_DOWN               BIT(6)
+
+#define ADV7511_HDMI_CFG_MODE_MASK             0x2
+#define ADV7511_HDMI_CFG_MODE_DVI              0x0
+#define ADV7511_HDMI_CFG_MODE_HDMI             0x2
+
+#define ADV7511_AUDIO_SELECT_I2C               0x0
+#define ADV7511_AUDIO_SELECT_SPDIF             0x1
+#define ADV7511_AUDIO_SELECT_DSD               0x2
+#define ADV7511_AUDIO_SELECT_HBR               0x3
+#define ADV7511_AUDIO_SELECT_DST               0x4
+
+#define ADV7511_I2S_SAMPLE_LEN_16              0x2
+#define ADV7511_I2S_SAMPLE_LEN_20              0x3
+#define ADV7511_I2S_SAMPLE_LEN_18              0x4
+#define ADV7511_I2S_SAMPLE_LEN_22              0x5
+#define ADV7511_I2S_SAMPLE_LEN_19              0x8
+#define ADV7511_I2S_SAMPLE_LEN_23              0x9
+#define ADV7511_I2S_SAMPLE_LEN_24              0xb
+#define ADV7511_I2S_SAMPLE_LEN_17              0xc
+#define ADV7511_I2S_SAMPLE_LEN_21              0xd
+
+#define ADV7511_SAMPLE_FREQ_44100              0x0
+#define ADV7511_SAMPLE_FREQ_48000              0x2
+#define ADV7511_SAMPLE_FREQ_32000              0x3
+#define ADV7511_SAMPLE_FREQ_88200              0x8
+#define ADV7511_SAMPLE_FREQ_96000              0xa
+#define ADV7511_SAMPLE_FREQ_176400             0xc
+#define ADV7511_SAMPLE_FREQ_192000             0xe
+
+#define ADV7511_STATUS_POWER_DOWN_POLARITY     BIT(7)
+#define ADV7511_STATUS_HPD                     BIT(6)
+#define ADV7511_STATUS_MONITOR_SENSE           BIT(5)
+#define ADV7511_STATUS_I2S_32BIT_MODE          BIT(3)
+
+#define ADV7511_PACKET_ENABLE_N_CTS            BIT(8+6)
+#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE     BIT(8+5)
+#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME    BIT(8+4)
+#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME  BIT(8+3)
+#define ADV7511_PACKET_ENABLE_GC               BIT(7)
+#define ADV7511_PACKET_ENABLE_SPD              BIT(6)
+#define ADV7511_PACKET_ENABLE_MPEG             BIT(5)
+#define ADV7511_PACKET_ENABLE_ACP              BIT(4)
+#define ADV7511_PACKET_ENABLE_ISRC             BIT(3)
+#define ADV7511_PACKET_ENABLE_GM               BIT(2)
+#define ADV7511_PACKET_ENABLE_SPARE2           BIT(1)
+#define ADV7511_PACKET_ENABLE_SPARE1           BIT(0)
+
+#define ADV7511_REG_POWER2_HPD_SRC_MASK                0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_BOTH                0x00
+#define ADV7511_REG_POWER2_HPD_SRC_HPD         0x40
+#define ADV7511_REG_POWER2_HPD_SRC_CEC         0x80
+#define ADV7511_REG_POWER2_HPD_SRC_NONE                0xc0
+#define ADV7511_REG_POWER2_TDMS_ENABLE         BIT(4)
+#define ADV7511_REG_POWER2_GATE_INPUT_CLK      BIT(0)
+
+#define ADV7511_LOW_REFRESH_RATE_NONE          0x0
+#define ADV7511_LOW_REFRESH_RATE_24HZ          0x1
+#define ADV7511_LOW_REFRESH_RATE_25HZ          0x2
+#define ADV7511_LOW_REFRESH_RATE_30HZ          0x3
+
+#define ADV7511_AUDIO_CFG3_LEN_MASK            0x0f
+#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK      0xf0
+
+#define ADV7511_AUDIO_SOURCE_I2S               0
+#define ADV7511_AUDIO_SOURCE_SPDIF             1
+
+#define ADV7511_I2S_FORMAT_I2S                 0
+#define ADV7511_I2S_FORMAT_RIGHT_J             1
+#define ADV7511_I2S_FORMAT_LEFT_J              2
+
+#define ADV7511_PACKET(p, x)       ((p) * 0x20 + (x))
+#define ADV7511_PACKET_SDP(x)      ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_MPEG(x)     ADV7511_PACKET(1, x)
+#define ADV7511_PACKET_ACP(x)      ADV7511_PACKET(2, x)
+#define ADV7511_PACKET_ISRC1(x)            ADV7511_PACKET(3, x)
+#define ADV7511_PACKET_ISRC2(x)            ADV7511_PACKET(4, x)
+#define ADV7511_PACKET_GM(x)       ADV7511_PACKET(5, x)
+#define ADV7511_PACKET_SPARE(x)            ADV7511_PACKET(6, x)
+
+enum adv7511_input_clock {
+       ADV7511_INPUT_CLOCK_1X,
+       ADV7511_INPUT_CLOCK_2X,
+       ADV7511_INPUT_CLOCK_DDR,
+};
+
+enum adv7511_input_justification {
+       ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
+       ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
+       ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
+};
+
+enum adv7511_input_sync_pulse {
+       ADV7511_INPUT_SYNC_PULSE_DE = 0,
+       ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
+       ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
+       ADV7511_INPUT_SYNC_PULSE_NONE = 3,
+};
+
+/**
+ * enum adv7511_sync_polarity - Polarity for the input sync signals
+ * @ADV7511_SYNC_POLARITY_PASSTHROUGH:  Sync polarity matches that of
+ *                                    the currently configured mode.
+ * @ADV7511_SYNC_POLARITY_LOW:     Sync polarity is low
+ * @ADV7511_SYNC_POLARITY_HIGH:            Sync polarity is high
+ *
+ * If the polarity is set to either LOW or HIGH the driver will configure the
+ * ADV7511 to internally invert the sync signal if required to match the sync
+ * polarity setting for the currently selected output mode.
+ *
+ * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
+ * unchanged. This is used when the upstream graphics core already generates
+ * the sync signals with the correct polarity.
+ */
+enum adv7511_sync_polarity {
+       ADV7511_SYNC_POLARITY_PASSTHROUGH,
+       ADV7511_SYNC_POLARITY_LOW,
+       ADV7511_SYNC_POLARITY_HIGH,
+};
+
+/**
+ * struct adv7511_link_config - Describes adv7511 hardware configuration
+ * @input_color_depth:         Number of bits per color component (8, 10 or 12)
+ * @input_colorspace:          The input colorspace (RGB, YUV444, YUV422)
+ * @input_clock:               The input video clock style (1x, 2x, DDR)
+ * @input_style:               The input component arrangement variant
+ * @input_justification:       Video input format bit justification
+ * @clock_delay:               Clock delay for the input clock (in ps)
+ * @embedded_sync:             Video input uses BT.656-style embedded sync
+ * @sync_pulse:                        Select the sync pulse
+ * @vsync_polarity:            vsync input signal configuration
+ * @hsync_polarity:            hsync input signal configuration
+ */
+struct adv7511_link_config {
+       unsigned int input_color_depth;
+       enum hdmi_colorspace input_colorspace;
+       enum adv7511_input_clock input_clock;
+       unsigned int input_style;
+       enum adv7511_input_justification input_justification;
+
+       int clock_delay;
+
+       bool embedded_sync;
+       enum adv7511_input_sync_pulse sync_pulse;
+       enum adv7511_sync_polarity vsync_polarity;
+       enum adv7511_sync_polarity hsync_polarity;
+};
+
+/**
+ * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
+ * @ADV7511_CSC_SCALING_1: CSC results are not scaled
+ * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
+ * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
+ */
+enum adv7511_csc_scaling {
+       ADV7511_CSC_SCALING_1 = 0,
+       ADV7511_CSC_SCALING_2 = 1,
+       ADV7511_CSC_SCALING_4 = 2,
+};
+
+/**
+ * struct adv7511_video_config - Describes adv7511 hardware configuration
+ * @csc_enable:                        Whether to enable color space conversion
+ * @csc_scaling_factor:                Color space conversion scaling factor
+ * @csc_coefficents:           Color space conversion coefficents
+ * @hdmi_mode:                 Whether to use HDMI or DVI output mode
+ * @avi_infoframe:             HDMI infoframe
+ */
+struct adv7511_video_config {
+       bool csc_enable;
+       enum adv7511_csc_scaling csc_scaling_factor;
+       const uint16_t *csc_coefficents;
+
+       bool hdmi_mode;
+       struct hdmi_avi_infoframe avi_infoframe;
+};
+
+enum adv7511_type {
+       ADV7511,
+       ADV7533,
+};
+
+struct adv7511 {
+       struct i2c_client *i2c_main;
+       struct i2c_client *i2c_edid;
+       struct i2c_client *i2c_cec;
+
+       struct regmap *regmap;
+       struct regmap *regmap_cec;
+       enum drm_connector_status status;
+       bool powered;
+
+       struct drm_display_mode curr_mode;
+
+       unsigned int f_tmds;
+
+       unsigned int current_edid_segment;
+       uint8_t edid_buf[256];
+       bool edid_read;
+
+       wait_queue_head_t wq;
+       struct drm_bridge bridge;
+       struct drm_connector connector;
+
+       bool embedded_sync;
+       enum adv7511_sync_polarity vsync_polarity;
+       enum adv7511_sync_polarity hsync_polarity;
+       bool rgb;
+
+       struct edid *edid;
+
+       struct gpio_desc *gpio_pd;
+
+       /* ADV7533 DSI RX related params */
+       struct device_node *host_node;
+       struct mipi_dsi_device *dsi;
+       u8 num_dsi_lanes;
+       bool use_timing_gen;
+
+       enum adv7511_type type;
+};
+
+#ifdef CONFIG_DRM_I2C_ADV7533
+void adv7533_dsi_power_on(struct adv7511 *adv);
+void adv7533_dsi_power_off(struct adv7511 *adv);
+void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
+int adv7533_patch_registers(struct adv7511 *adv);
+void adv7533_uninit_cec(struct adv7511 *adv);
+int adv7533_init_cec(struct adv7511 *adv);
+int adv7533_attach_dsi(struct adv7511 *adv);
+void adv7533_detach_dsi(struct adv7511 *adv);
+int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
+#else
+static inline void adv7533_dsi_power_on(struct adv7511 *adv)
+{
+}
+
+static inline void adv7533_dsi_power_off(struct adv7511 *adv)
+{
+}
+
+static inline void adv7533_mode_set(struct adv7511 *adv,
+                                   struct drm_display_mode *mode)
+{
+}
+
+static inline int adv7533_patch_registers(struct adv7511 *adv)
+{
+       return -ENODEV;
+}
+
+static inline void adv7533_uninit_cec(struct adv7511 *adv)
+{
+}
+
+static inline int adv7533_init_cec(struct adv7511 *adv)
+{
+       return -ENODEV;
+}
+
+static inline int adv7533_attach_dsi(struct adv7511 *adv)
+{
+       return -ENODEV;
+}
+
+static inline void adv7533_detach_dsi(struct adv7511 *adv)
+{
+}
+
+static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+{
+       return -ENODEV;
+}
+#endif
+
+#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
new file mode 100644 (file)
index 0000000..ec8fb2e
--- /dev/null
@@ -0,0 +1,1124 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+
+#include "adv7511.h"
+
+/* ADI recommended values for proper operation. */
+static const struct reg_sequence adv7511_fixed_registers[] = {
+       { 0x98, 0x03 },
+       { 0x9a, 0xe0 },
+       { 0x9c, 0x30 },
+       { 0x9d, 0x61 },
+       { 0xa2, 0xa4 },
+       { 0xa3, 0xa4 },
+       { 0xe0, 0xd0 },
+       { 0xf9, 0x00 },
+       { 0x55, 0x02 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Register access
+ */
+
+static const uint8_t adv7511_register_defaults[] = {
+       0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
+       0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
+       0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
+       0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
+       0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
+       0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
+       0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
+       0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
+       0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
+       0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
+       0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
+       0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
+       0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
+       0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
+       0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
+       0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case ADV7511_REG_CHIP_REVISION:
+       case ADV7511_REG_SPDIF_FREQ:
+       case ADV7511_REG_CTS_AUTOMATIC1:
+       case ADV7511_REG_CTS_AUTOMATIC2:
+       case ADV7511_REG_VIC_DETECTED:
+       case ADV7511_REG_VIC_SEND:
+       case ADV7511_REG_AUX_VIC_DETECTED:
+       case ADV7511_REG_STATUS:
+       case ADV7511_REG_GC(1):
+       case ADV7511_REG_INT(0):
+       case ADV7511_REG_INT(1):
+       case ADV7511_REG_PLL_STATUS:
+       case ADV7511_REG_AN(0):
+       case ADV7511_REG_AN(1):
+       case ADV7511_REG_AN(2):
+       case ADV7511_REG_AN(3):
+       case ADV7511_REG_AN(4):
+       case ADV7511_REG_AN(5):
+       case ADV7511_REG_AN(6):
+       case ADV7511_REG_AN(7):
+       case ADV7511_REG_HDCP_STATUS:
+       case ADV7511_REG_BCAPS:
+       case ADV7511_REG_BKSV(0):
+       case ADV7511_REG_BKSV(1):
+       case ADV7511_REG_BKSV(2):
+       case ADV7511_REG_BKSV(3):
+       case ADV7511_REG_BKSV(4):
+       case ADV7511_REG_DDC_STATUS:
+       case ADV7511_REG_EDID_READ_CTRL:
+       case ADV7511_REG_BSTATUS(0):
+       case ADV7511_REG_BSTATUS(1):
+       case ADV7511_REG_CHIP_ID_HIGH:
+       case ADV7511_REG_CHIP_ID_LOW:
+               return true;
+       }
+
+       return false;
+}
+
+static const struct regmap_config adv7511_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = 0xff,
+       .cache_type = REGCACHE_RBTREE,
+       .reg_defaults_raw = adv7511_register_defaults,
+       .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
+
+       .volatile_reg = adv7511_register_volatile,
+};
+
+/* -----------------------------------------------------------------------------
+ * Hardware configuration
+ */
+
+static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
+                                const uint16_t *coeff,
+                                unsigned int scaling_factor)
+{
+       unsigned int i;
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+                          ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
+
+       if (enable) {
+               for (i = 0; i < 12; ++i) {
+                       regmap_update_bits(adv7511->regmap,
+                                          ADV7511_REG_CSC_UPPER(i),
+                                          0x1f, coeff[i] >> 8);
+                       regmap_write(adv7511->regmap,
+                                    ADV7511_REG_CSC_LOWER(i),
+                                    coeff[i] & 0xff);
+               }
+       }
+
+       if (enable)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+                                  0xe0, 0x80 | (scaling_factor << 5));
+       else
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
+                                  0x80, 0x00);
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
+                          ADV7511_CSC_UPDATE_MODE, 0);
+}
+
+static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+{
+       if (packet & 0xff)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+                                  packet, 0xff);
+
+       if (packet & 0xff00) {
+               packet >>= 8;
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+                                  packet, 0xff);
+       }
+
+       return 0;
+}
+
+static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+{
+       if (packet & 0xff)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
+                                  packet, 0x00);
+
+       if (packet & 0xff00) {
+               packet >>= 8;
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
+                                  packet, 0x00);
+       }
+
+       return 0;
+}
+
+/* Coefficients for adv7511 color space conversion */
+static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
+       0x0734, 0x04ad, 0x0000, 0x1c1b,
+       0x1ddc, 0x04ad, 0x1f24, 0x0135,
+       0x0000, 0x04ad, 0x087c, 0x1b77,
+};
+
+static void adv7511_set_config_csc(struct adv7511 *adv7511,
+                                  struct drm_connector *connector,
+                                  bool rgb)
+{
+       struct adv7511_video_config config;
+       bool output_format_422, output_format_ycbcr;
+       unsigned int mode;
+       uint8_t infoframe[17];
+
+       if (adv7511->edid)
+               config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
+       else
+               config.hdmi_mode = false;
+
+       hdmi_avi_infoframe_init(&config.avi_infoframe);
+
+       config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
+
+       if (rgb) {
+               config.csc_enable = false;
+               config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+       } else {
+               config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
+               config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
+
+               if ((connector->display_info.color_formats &
+                    DRM_COLOR_FORMAT_YCRCB422) &&
+                   config.hdmi_mode) {
+                       config.csc_enable = false;
+                       config.avi_infoframe.colorspace =
+                               HDMI_COLORSPACE_YUV422;
+               } else {
+                       config.csc_enable = true;
+                       config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+               }
+       }
+
+       if (config.hdmi_mode) {
+               mode = ADV7511_HDMI_CFG_MODE_HDMI;
+
+               switch (config.avi_infoframe.colorspace) {
+               case HDMI_COLORSPACE_YUV444:
+                       output_format_422 = false;
+                       output_format_ycbcr = true;
+                       break;
+               case HDMI_COLORSPACE_YUV422:
+                       output_format_422 = true;
+                       output_format_ycbcr = true;
+                       break;
+               default:
+                       output_format_422 = false;
+                       output_format_ycbcr = false;
+                       break;
+               }
+       } else {
+               mode = ADV7511_HDMI_CFG_MODE_DVI;
+               output_format_422 = false;
+               output_format_ycbcr = false;
+       }
+
+       adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+
+       adv7511_set_colormap(adv7511, config.csc_enable,
+                            config.csc_coefficents,
+                            config.csc_scaling_factor);
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
+                          (output_format_422 << 7) | output_format_ycbcr);
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
+                          ADV7511_HDMI_CFG_MODE_MASK, mode);
+
+       hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
+                               sizeof(infoframe));
+
+       /* The AVI infoframe id is not configurable */
+       regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+                         infoframe + 1, sizeof(infoframe) - 1);
+
+       adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+}
+
+static void adv7511_set_link_config(struct adv7511 *adv7511,
+                                   const struct adv7511_link_config *config)
+{
+       /*
+        * The input style values documented in the datasheet don't match the
+        * hardware register field values :-(
+        */
+       static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
+
+       unsigned int clock_delay;
+       unsigned int color_depth;
+       unsigned int input_id;
+
+       clock_delay = (config->clock_delay + 1200) / 400;
+       color_depth = config->input_color_depth == 8 ? 3
+                   : (config->input_color_depth == 10 ? 1 : 2);
+
+       /* TODO Support input ID 6 */
+       if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
+               input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
+                        ? 5 : 0;
+       else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
+               input_id = config->embedded_sync ? 8 : 7;
+       else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
+               input_id = config->embedded_sync ? 4 : 3;
+       else
+               input_id = config->embedded_sync ? 2 : 1;
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
+                          input_id);
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
+                          (color_depth << 4) |
+                          (input_styles[config->input_style] << 2));
+       regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
+                    config->input_justification << 3);
+       regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
+                    config->sync_pulse << 2);
+
+       regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
+
+       adv7511->embedded_sync = config->embedded_sync;
+       adv7511->hsync_polarity = config->hsync_polarity;
+       adv7511->vsync_polarity = config->vsync_polarity;
+       adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
+}
+
+static void adv7511_power_on(struct adv7511 *adv7511)
+{
+       adv7511->current_edid_segment = -1;
+
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+                          ADV7511_POWER_POWER_DOWN, 0);
+       if (adv7511->i2c_main->irq) {
+               /*
+                * Documentation says the INT_ENABLE registers are reset in
+                * POWER_DOWN mode. My 7511w preserved the bits, however.
+                * Still, let's be safe and stick to the documentation.
+                */
+               regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+                            ADV7511_INT0_EDID_READY);
+               regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+                            ADV7511_INT1_DDC_ERROR);
+       }
+
+       /*
+        * Per spec it is allowed to pulse the HPD signal to indicate that the
+        * EDID information has changed. Some monitors do this when they wakeup
+        * from standby or are enabled. When the HPD goes low the adv7511 is
+        * reset and the outputs are disabled which might cause the monitor to
+        * go to standby again. To avoid this we ignore the HPD pin for the
+        * first few seconds after enabling the output.
+        */
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+                          ADV7511_REG_POWER2_HPD_SRC_MASK,
+                          ADV7511_REG_POWER2_HPD_SRC_NONE);
+
+       /*
+        * Most of the registers are reset during power down or when HPD is low.
+        */
+       regcache_sync(adv7511->regmap);
+
+       if (adv7511->type == ADV7533)
+               adv7533_dsi_power_on(adv7511);
+
+       adv7511->powered = true;
+}
+
+static void adv7511_power_off(struct adv7511 *adv7511)
+{
+       /* TODO: setup additional power down modes */
+       regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+                          ADV7511_POWER_POWER_DOWN,
+                          ADV7511_POWER_POWER_DOWN);
+       regcache_mark_dirty(adv7511->regmap);
+
+       if (adv7511->type == ADV7533)
+               adv7533_dsi_power_off(adv7511);
+
+       adv7511->powered = false;
+}
+
+/* -----------------------------------------------------------------------------
+ * Interrupt and hotplug detection
+ */
+
+static bool adv7511_hpd(struct adv7511 *adv7511)
+{
+       unsigned int irq0;
+       int ret;
+
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+       if (ret < 0)
+               return false;
+
+       if (irq0 & ADV7511_INT0_HPD) {
+               regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+                            ADV7511_INT0_HPD);
+               return true;
+       }
+
+       return false;
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
+{
+       unsigned int irq0, irq1;
+       int ret;
+
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
+       if (ret < 0)
+               return ret;
+
+       regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
+       regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
+
+       if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
+               drm_helper_hpd_irq_event(adv7511->connector.dev);
+
+       if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
+               adv7511->edid_read = true;
+
+               if (adv7511->i2c_main->irq)
+                       wake_up_all(&adv7511->wq);
+       }
+
+       return 0;
+}
+
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+       struct adv7511 *adv7511 = devid;
+       int ret;
+
+       ret = adv7511_irq_process(adv7511, true);
+       return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
+}
+
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
+{
+       int ret;
+
+       if (adv7511->i2c_main->irq) {
+               ret = wait_event_interruptible_timeout(adv7511->wq,
+                               adv7511->edid_read, msecs_to_jiffies(timeout));
+       } else {
+               for (; timeout > 0; timeout -= 25) {
+                       ret = adv7511_irq_process(adv7511, false);
+                       if (ret < 0)
+                               break;
+
+                       if (adv7511->edid_read)
+                               break;
+
+                       msleep(25);
+               }
+       }
+
+       return adv7511->edid_read ? 0 : -EIO;
+}
+
+static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
+                                 size_t len)
+{
+       struct adv7511 *adv7511 = data;
+       struct i2c_msg xfer[2];
+       uint8_t offset;
+       unsigned int i;
+       int ret;
+
+       if (len > 128)
+               return -EINVAL;
+
+       if (adv7511->current_edid_segment != block / 2) {
+               unsigned int status;
+
+               ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
+                                 &status);
+               if (ret < 0)
+                       return ret;
+
+               if (status != 2) {
+                       adv7511->edid_read = false;
+                       regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
+                                    block);
+                       ret = adv7511_wait_for_edid(adv7511, 200);
+                       if (ret < 0)
+                               return ret;
+               }
+
+               /* Break this apart, hopefully more I2C controllers will
+                * support 64 byte transfers than 256 byte transfers
+                */
+
+               xfer[0].addr = adv7511->i2c_edid->addr;
+               xfer[0].flags = 0;
+               xfer[0].len = 1;
+               xfer[0].buf = &offset;
+               xfer[1].addr = adv7511->i2c_edid->addr;
+               xfer[1].flags = I2C_M_RD;
+               xfer[1].len = 64;
+               xfer[1].buf = adv7511->edid_buf;
+
+               offset = 0;
+
+               for (i = 0; i < 4; ++i) {
+                       ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
+                                          ARRAY_SIZE(xfer));
+                       if (ret < 0)
+                               return ret;
+                       else if (ret != 2)
+                               return -EIO;
+
+                       xfer[1].buf += 64;
+                       offset += 64;
+               }
+
+               adv7511->current_edid_segment = block / 2;
+       }
+
+       if (block % 2 == 0)
+               memcpy(buf, adv7511->edid_buf, len);
+       else
+               memcpy(buf, adv7511->edid_buf + 128, len);
+
+       return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * ADV75xx helpers
+ */
+
+static int adv7511_get_modes(struct adv7511 *adv7511,
+                            struct drm_connector *connector)
+{
+       struct edid *edid;
+       unsigned int count;
+
+       /* Reading the EDID only works if the device is powered */
+       if (!adv7511->powered) {
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+                                  ADV7511_POWER_POWER_DOWN, 0);
+               if (adv7511->i2c_main->irq) {
+                       regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+                                    ADV7511_INT0_EDID_READY);
+                       regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+                                    ADV7511_INT1_DDC_ERROR);
+               }
+               adv7511->current_edid_segment = -1;
+       }
+
+       edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+
+       if (!adv7511->powered)
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+                                  ADV7511_POWER_POWER_DOWN,
+                                  ADV7511_POWER_POWER_DOWN);
+
+       kfree(adv7511->edid);
+       adv7511->edid = edid;
+       if (!edid)
+               return 0;
+
+       drm_mode_connector_update_edid_property(connector, edid);
+       count = drm_add_edid_modes(connector, edid);
+
+       adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
+
+       return count;
+}
+
+static enum drm_connector_status
+adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+{
+       enum drm_connector_status status;
+       unsigned int val;
+       bool hpd;
+       int ret;
+
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+       if (ret < 0)
+               return connector_status_disconnected;
+
+       if (val & ADV7511_STATUS_HPD)
+               status = connector_status_connected;
+       else
+               status = connector_status_disconnected;
+
+       hpd = adv7511_hpd(adv7511);
+
+       /* The chip resets itself when the cable is disconnected, so in case
+        * there is a pending HPD interrupt and the cable is connected there was
+        * at least one transition from disconnected to connected and the chip
+        * has to be reinitialized. */
+       if (status == connector_status_connected && hpd && adv7511->powered) {
+               regcache_mark_dirty(adv7511->regmap);
+               adv7511_power_on(adv7511);
+               adv7511_get_modes(adv7511, connector);
+               if (adv7511->status == connector_status_connected)
+                       status = connector_status_disconnected;
+       } else {
+               /* Renable HPD sensing */
+               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+                                  ADV7511_REG_POWER2_HPD_SRC_MASK,
+                                  ADV7511_REG_POWER2_HPD_SRC_BOTH);
+       }
+
+       adv7511->status = status;
+       return status;
+}
+
+static int adv7511_mode_valid(struct adv7511 *adv7511,
+                             struct drm_display_mode *mode)
+{
+       if (mode->clock > 165000)
+               return MODE_CLOCK_HIGH;
+
+       return MODE_OK;
+}
+
+static void adv7511_mode_set(struct adv7511 *adv7511,
+                            struct drm_display_mode *mode,
+                            struct drm_display_mode *adj_mode)
+{
+       unsigned int low_refresh_rate;
+       unsigned int hsync_polarity = 0;
+       unsigned int vsync_polarity = 0;
+
+       if (adv7511->embedded_sync) {
+               unsigned int hsync_offset, hsync_len;
+               unsigned int vsync_offset, vsync_len;
+
+               hsync_offset = adj_mode->crtc_hsync_start -
+                              adj_mode->crtc_hdisplay;
+               vsync_offset = adj_mode->crtc_vsync_start -
+                              adj_mode->crtc_vdisplay;
+               hsync_len = adj_mode->crtc_hsync_end -
+                           adj_mode->crtc_hsync_start;
+               vsync_len = adj_mode->crtc_vsync_end -
+                           adj_mode->crtc_vsync_start;
+
+               /* The hardware vsync generator has a off-by-one bug */
+               vsync_offset += 1;
+
+               regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
+                            ((hsync_offset >> 10) & 0x7) << 5);
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
+                            (hsync_offset >> 2) & 0xff);
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
+                            ((hsync_offset & 0x3) << 6) |
+                            ((hsync_len >> 4) & 0x3f));
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
+                            ((hsync_len & 0xf) << 4) |
+                            ((vsync_offset >> 6) & 0xf));
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
+                            ((vsync_offset & 0x3f) << 2) |
+                            ((vsync_len >> 8) & 0x3));
+               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
+                            vsync_len & 0xff);
+
+               hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
+               vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
+       } else {
+               enum adv7511_sync_polarity mode_hsync_polarity;
+               enum adv7511_sync_polarity mode_vsync_polarity;
+
+               /**
+                * If the input signal is always low or always high we want to
+                * invert or let it passthrough depending on the polarity of the
+                * current mode.
+                **/
+               if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
+                       mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+               else
+                       mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+               if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
+                       mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
+               else
+                       mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
+
+               if (adv7511->hsync_polarity != mode_hsync_polarity &&
+                   adv7511->hsync_polarity !=
+                   ADV7511_SYNC_POLARITY_PASSTHROUGH)
+                       hsync_polarity = 1;
+
+               if (adv7511->vsync_polarity != mode_vsync_polarity &&
+                   adv7511->vsync_polarity !=
+                   ADV7511_SYNC_POLARITY_PASSTHROUGH)
+                       vsync_polarity = 1;
+       }
+
+       if (mode->vrefresh <= 24000)
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
+       else if (mode->vrefresh <= 25000)
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
+       else if (mode->vrefresh <= 30000)
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
+       else
+               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
+
+       regmap_update_bits(adv7511->regmap, 0xfb,
+               0x6, low_refresh_rate << 1);
+       regmap_update_bits(adv7511->regmap, 0x17,
+               0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+
+       if (adv7511->type == ADV7533)
+               adv7533_mode_set(adv7511, adj_mode);
+
+       drm_mode_copy(&adv7511->curr_mode, adj_mode);
+
+       /*
+        * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
+        * supposed to give better results.
+        */
+
+       adv7511->f_tmds = mode->clock;
+}
+
+/* Connector funcs */
+static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
+{
+       return container_of(connector, struct adv7511, connector);
+}
+
+static int adv7511_connector_get_modes(struct drm_connector *connector)
+{
+       struct adv7511 *adv = connector_to_adv7511(connector);
+
+       return adv7511_get_modes(adv, connector);
+}
+
+static enum drm_mode_status
+adv7511_connector_mode_valid(struct drm_connector *connector,
+                            struct drm_display_mode *mode)
+{
+       struct adv7511 *adv = connector_to_adv7511(connector);
+
+       return adv7511_mode_valid(adv, mode);
+}
+
+static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
+       .get_modes = adv7511_connector_get_modes,
+       .mode_valid = adv7511_connector_mode_valid,
+};
+
+static enum drm_connector_status
+adv7511_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct adv7511 *adv = connector_to_adv7511(connector);
+
+       return adv7511_detect(adv, connector);
+}
+
+static struct drm_connector_funcs adv7511_connector_funcs = {
+       .dpms = drm_atomic_helper_connector_dpms,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .detect = adv7511_connector_detect,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* Bridge funcs */
+static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+       return container_of(bridge, struct adv7511, bridge);
+}
+
+static void adv7511_bridge_enable(struct drm_bridge *bridge)
+{
+       struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+       adv7511_power_on(adv);
+}
+
+static void adv7511_bridge_disable(struct drm_bridge *bridge)
+{
+       struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+       adv7511_power_off(adv);
+}
+
+static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
+                                   struct drm_display_mode *mode,
+                                   struct drm_display_mode *adj_mode)
+{
+       struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+       adv7511_mode_set(adv, mode, adj_mode);
+}
+
+static int adv7511_bridge_attach(struct drm_bridge *bridge)
+{
+       struct adv7511 *adv = bridge_to_adv7511(bridge);
+       int ret;
+
+       if (!bridge->encoder) {
+               DRM_ERROR("Parent encoder object not found");
+               return -ENODEV;
+       }
+
+       adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+       ret = drm_connector_init(bridge->dev, &adv->connector,
+                                &adv7511_connector_funcs,
+                                DRM_MODE_CONNECTOR_HDMIA);
+       if (ret) {
+               DRM_ERROR("Failed to initialize connector with drm\n");
+               return ret;
+       }
+       drm_connector_helper_add(&adv->connector,
+                                &adv7511_connector_helper_funcs);
+       drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+       if (adv->type == ADV7533)
+               ret = adv7533_attach_dsi(adv);
+
+       return ret;
+}
+
+static struct drm_bridge_funcs adv7511_bridge_funcs = {
+       .enable = adv7511_bridge_enable,
+       .disable = adv7511_bridge_disable,
+       .mode_set = adv7511_bridge_mode_set,
+       .attach = adv7511_bridge_attach,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
+
+static int adv7511_parse_dt(struct device_node *np,
+                           struct adv7511_link_config *config)
+{
+       const char *str;
+       int ret;
+
+       of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
+       if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
+           config->input_color_depth != 12)
+               return -EINVAL;
+
+       ret = of_property_read_string(np, "adi,input-colorspace", &str);
+       if (ret < 0)
+               return ret;
+
+       if (!strcmp(str, "rgb"))
+               config->input_colorspace = HDMI_COLORSPACE_RGB;
+       else if (!strcmp(str, "yuv422"))
+               config->input_colorspace = HDMI_COLORSPACE_YUV422;
+       else if (!strcmp(str, "yuv444"))
+               config->input_colorspace = HDMI_COLORSPACE_YUV444;
+       else
+               return -EINVAL;
+
+       ret = of_property_read_string(np, "adi,input-clock", &str);
+       if (ret < 0)
+               return ret;
+
+       if (!strcmp(str, "1x"))
+               config->input_clock = ADV7511_INPUT_CLOCK_1X;
+       else if (!strcmp(str, "2x"))
+               config->input_clock = ADV7511_INPUT_CLOCK_2X;
+       else if (!strcmp(str, "ddr"))
+               config->input_clock = ADV7511_INPUT_CLOCK_DDR;
+       else
+               return -EINVAL;
+
+       if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
+           config->input_clock != ADV7511_INPUT_CLOCK_1X) {
+               ret = of_property_read_u32(np, "adi,input-style",
+                                          &config->input_style);
+               if (ret)
+                       return ret;
+
+               if (config->input_style < 1 || config->input_style > 3)
+                       return -EINVAL;
+
+               ret = of_property_read_string(np, "adi,input-justification",
+                                             &str);
+               if (ret < 0)
+                       return ret;
+
+               if (!strcmp(str, "left"))
+                       config->input_justification =
+                               ADV7511_INPUT_JUSTIFICATION_LEFT;
+               else if (!strcmp(str, "evenly"))
+                       config->input_justification =
+                               ADV7511_INPUT_JUSTIFICATION_EVENLY;
+               else if (!strcmp(str, "right"))
+                       config->input_justification =
+                               ADV7511_INPUT_JUSTIFICATION_RIGHT;
+               else
+                       return -EINVAL;
+
+       } else {
+               config->input_style = 1;
+               config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
+       }
+
+       of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
+       if (config->clock_delay < -1200 || config->clock_delay > 1600)
+               return -EINVAL;
+
+       config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
+
+       /* Hardcode the sync pulse configurations for now. */
+       config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
+       config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+       config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
+
+       return 0;
+}
+
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
+static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+       struct adv7511_link_config link_config;
+       struct adv7511 *adv7511;
+       struct device *dev = &i2c->dev;
+       unsigned int val;
+       int ret;
+
+       if (!dev->of_node)
+               return -EINVAL;
+
+       adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
+       if (!adv7511)
+               return -ENOMEM;
+
+       adv7511->powered = false;
+       adv7511->status = connector_status_disconnected;
+
+       if (dev->of_node)
+               adv7511->type = (enum adv7511_type)of_device_get_match_data(dev);
+       else
+               adv7511->type = id->driver_data;
+
+       memset(&link_config, 0, sizeof(link_config));
+
+       if (adv7511->type == ADV7511)
+               ret = adv7511_parse_dt(dev->of_node, &link_config);
+       else
+               ret = adv7533_parse_dt(dev->of_node, adv7511);
+       if (ret)
+               return ret;
+
+       /*
+        * The power down GPIO is optional. If present, toggle it from active to
+        * inactive to wake up the encoder.
+        */
+       adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
+       if (IS_ERR(adv7511->gpio_pd))
+               return PTR_ERR(adv7511->gpio_pd);
+
+       if (adv7511->gpio_pd) {
+               mdelay(5);
+               gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
+       }
+
+       adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
+       if (IS_ERR(adv7511->regmap))
+               return PTR_ERR(adv7511->regmap);
+
+       ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
+       if (ret)
+               return ret;
+       dev_dbg(dev, "Rev. %d\n", val);
+
+       if (adv7511->type == ADV7511)
+               ret = regmap_register_patch(adv7511->regmap,
+                                           adv7511_fixed_registers,
+                                           ARRAY_SIZE(adv7511_fixed_registers));
+       else
+               ret = adv7533_patch_registers(adv7511);
+       if (ret)
+               return ret;
+
+       regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
+       regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
+                    packet_i2c_addr);
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
+       adv7511_packet_disable(adv7511, 0xffff);
+
+       adv7511->i2c_main = i2c;
+       adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
+       if (!adv7511->i2c_edid)
+               return -ENOMEM;
+
+       if (adv7511->type == ADV7533) {
+               ret = adv7533_init_cec(adv7511);
+               if (ret)
+                       goto err_i2c_unregister_edid;
+       }
+
+       if (i2c->irq) {
+               init_waitqueue_head(&adv7511->wq);
+
+               ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+                                               adv7511_irq_handler,
+                                               IRQF_ONESHOT, dev_name(dev),
+                                               adv7511);
+               if (ret)
+                       goto err_unregister_cec;
+       }
+
+       /* CEC is unused for now */
+       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+                    ADV7511_CEC_CTRL_POWER_DOWN);
+
+       adv7511_power_off(adv7511);
+
+       i2c_set_clientdata(i2c, adv7511);
+
+       if (adv7511->type == ADV7511)
+               adv7511_set_link_config(adv7511, &link_config);
+
+       adv7511->bridge.funcs = &adv7511_bridge_funcs;
+       adv7511->bridge.of_node = dev->of_node;
+
+       ret = drm_bridge_add(&adv7511->bridge);
+       if (ret) {
+               dev_err(dev, "failed to add adv7511 bridge\n");
+               goto err_unregister_cec;
+       }
+
+       return 0;
+
+err_unregister_cec:
+       adv7533_uninit_cec(adv7511);
+err_i2c_unregister_edid:
+       i2c_unregister_device(adv7511->i2c_edid);
+
+       return ret;
+}
+
+static int adv7511_remove(struct i2c_client *i2c)
+{
+       struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
+       if (adv7511->type == ADV7533) {
+               adv7533_detach_dsi(adv7511);
+               adv7533_uninit_cec(adv7511);
+       }
+
+       drm_bridge_remove(&adv7511->bridge);
+
+       i2c_unregister_device(adv7511->i2c_edid);
+
+       kfree(adv7511->edid);
+
+       return 0;
+}
+
+static const struct i2c_device_id adv7511_i2c_ids[] = {
+       { "adv7511", ADV7511 },
+       { "adv7511w", ADV7511 },
+       { "adv7513", ADV7511 },
+#ifdef CONFIG_DRM_I2C_ADV7533
+       { "adv7533", ADV7533 },
+#endif
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
+
+static const struct of_device_id adv7511_of_ids[] = {
+       { .compatible = "adi,adv7511", .data = (void *)ADV7511 },
+       { .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
+       { .compatible = "adi,adv7513", .data = (void *)ADV7511 },
+#ifdef CONFIG_DRM_I2C_ADV7533
+       { .compatible = "adi,adv7533", .data = (void *)ADV7533 },
+#endif
+       { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
+static struct mipi_dsi_driver adv7533_dsi_driver = {
+       .driver.name = "adv7533",
+};
+
+static struct i2c_driver adv7511_driver = {
+       .driver = {
+               .name = "adv7511",
+               .of_match_table = adv7511_of_ids,
+       },
+       .id_table = adv7511_i2c_ids,
+       .probe = adv7511_probe,
+       .remove = adv7511_remove,
+};
+
+static int __init adv7511_init(void)
+{
+       if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+               mipi_dsi_driver_register(&adv7533_dsi_driver);
+
+       return i2c_add_driver(&adv7511_driver);
+}
+module_init(adv7511_init);
+
+static void __exit adv7511_exit(void)
+{
+       i2c_del_driver(&adv7511_driver);
+
+       if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
+               mipi_dsi_driver_unregister(&adv7533_dsi_driver);
+}
+module_exit(adv7511_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
new file mode 100644 (file)
index 0000000..5eebd15
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of_graph.h>
+
+#include "adv7511.h"
+
+static const struct reg_sequence adv7533_fixed_registers[] = {
+       { 0x16, 0x20 },
+       { 0x9a, 0xe0 },
+       { 0xba, 0x70 },
+       { 0xde, 0x82 },
+       { 0xe4, 0x40 },
+       { 0xe5, 0x80 },
+};
+
+static const struct reg_sequence adv7533_cec_fixed_registers[] = {
+       { 0x15, 0xd0 },
+       { 0x17, 0xd0 },
+       { 0x24, 0x20 },
+       { 0x57, 0x11 },
+};
+
+static const struct regmap_config adv7533_cec_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = 0xff,
+       .cache_type = REGCACHE_RBTREE,
+};
+
+static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+{
+       struct mipi_dsi_device *dsi = adv->dsi;
+       struct drm_display_mode *mode = &adv->curr_mode;
+       unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
+       u8 clock_div_by_lanes[] = { 6, 4, 3 };  /* 2, 3, 4 lanes */
+
+       hsw = mode->hsync_end - mode->hsync_start;
+       hfp = mode->hsync_start - mode->hdisplay;
+       hbp = mode->htotal - mode->hsync_end;
+       vsw = mode->vsync_end - mode->vsync_start;
+       vfp = mode->vsync_start - mode->vdisplay;
+       vbp = mode->vtotal - mode->vsync_end;
+
+       /* set pixel clock divider mode */
+       regmap_write(adv->regmap_cec, 0x16,
+                    clock_div_by_lanes[dsi->lanes - 2] << 3);
+
+       /* horizontal porch params */
+       regmap_write(adv->regmap_cec, 0x28, mode->htotal >> 4);
+       regmap_write(adv->regmap_cec, 0x29, (mode->htotal << 4) & 0xff);
+       regmap_write(adv->regmap_cec, 0x2a, hsw >> 4);
+       regmap_write(adv->regmap_cec, 0x2b, (hsw << 4) & 0xff);
+       regmap_write(adv->regmap_cec, 0x2c, hfp >> 4);
+       regmap_write(adv->regmap_cec, 0x2d, (hfp << 4) & 0xff);
+       regmap_write(adv->regmap_cec, 0x2e, hbp >> 4);
+       regmap_write(adv->regmap_cec, 0x2f, (hbp << 4) & 0xff);
+
+       /* vertical porch params */
+       regmap_write(adv->regmap_cec, 0x30, mode->vtotal >> 4);
+       regmap_write(adv->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff);
+       regmap_write(adv->regmap_cec, 0x32, vsw >> 4);
+       regmap_write(adv->regmap_cec, 0x33, (vsw << 4) & 0xff);
+       regmap_write(adv->regmap_cec, 0x34, vfp >> 4);
+       regmap_write(adv->regmap_cec, 0x35, (vfp << 4) & 0xff);
+       regmap_write(adv->regmap_cec, 0x36, vbp >> 4);
+       regmap_write(adv->regmap_cec, 0x37, (vbp << 4) & 0xff);
+}
+
+void adv7533_dsi_power_on(struct adv7511 *adv)
+{
+       struct mipi_dsi_device *dsi = adv->dsi;
+
+       if (adv->use_timing_gen)
+               adv7511_dsi_config_timing_gen(adv);
+
+       /* set number of dsi lanes */
+       regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
+
+       if (adv->use_timing_gen) {
+               /* reset internal timing generator */
+               regmap_write(adv->regmap_cec, 0x27, 0xcb);
+               regmap_write(adv->regmap_cec, 0x27, 0x8b);
+               regmap_write(adv->regmap_cec, 0x27, 0xcb);
+       } else {
+               /* disable internal timing generator */
+               regmap_write(adv->regmap_cec, 0x27, 0x0b);
+       }
+
+       /* enable hdmi */
+       regmap_write(adv->regmap_cec, 0x03, 0x89);
+       /* disable test mode */
+       regmap_write(adv->regmap_cec, 0x55, 0x00);
+
+       regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers,
+                             ARRAY_SIZE(adv7533_cec_fixed_registers));
+}
+
+void adv7533_dsi_power_off(struct adv7511 *adv)
+{
+       /* disable hdmi */
+       regmap_write(adv->regmap_cec, 0x03, 0x0b);
+       /* disable internal timing generator */
+       regmap_write(adv->regmap_cec, 0x27, 0x0b);
+}
+
+void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode)
+{
+       struct mipi_dsi_device *dsi = adv->dsi;
+       int lanes, ret;
+
+       if (adv->num_dsi_lanes != 4)
+               return;
+
+       if (mode->clock > 80000)
+               lanes = 4;
+       else
+               lanes = 3;
+
+       if (lanes != dsi->lanes) {
+               mipi_dsi_detach(dsi);
+               dsi->lanes = lanes;
+               ret = mipi_dsi_attach(dsi);
+               if (ret)
+                       dev_err(&dsi->dev, "failed to change host lanes\n");
+       }
+}
+
+int adv7533_patch_registers(struct adv7511 *adv)
+{
+       return regmap_register_patch(adv->regmap,
+                                    adv7533_fixed_registers,
+                                    ARRAY_SIZE(adv7533_fixed_registers));
+}
+
+void adv7533_uninit_cec(struct adv7511 *adv)
+{
+       i2c_unregister_device(adv->i2c_cec);
+}
+
+static const int cec_i2c_addr = 0x78;
+
+int adv7533_init_cec(struct adv7511 *adv)
+{
+       int ret;
+
+       adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
+       if (!adv->i2c_cec)
+               return -ENOMEM;
+
+       adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
+                                       &adv7533_cec_regmap_config);
+       if (IS_ERR(adv->regmap_cec)) {
+               ret = PTR_ERR(adv->regmap_cec);
+               goto err;
+       }
+
+       ret = regmap_register_patch(adv->regmap_cec,
+                                   adv7533_cec_fixed_registers,
+                                   ARRAY_SIZE(adv7533_cec_fixed_registers));
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       adv7533_uninit_cec(adv);
+       return ret;
+}
+
+int adv7533_attach_dsi(struct adv7511 *adv)
+{
+       struct device *dev = &adv->i2c_main->dev;
+       struct mipi_dsi_host *host;
+       struct mipi_dsi_device *dsi;
+       int ret = 0;
+       const struct mipi_dsi_device_info info = { .type = "adv7533",
+                                                  .channel = 0,
+                                                  .node = NULL,
+                                                };
+
+       host = of_find_mipi_dsi_host_by_node(adv->host_node);
+       if (!host) {
+               dev_err(dev, "failed to find dsi host\n");
+               return -EPROBE_DEFER;
+       }
+
+       dsi = mipi_dsi_device_register_full(host, &info);
+       if (IS_ERR(dsi)) {
+               dev_err(dev, "failed to create dsi device\n");
+               ret = PTR_ERR(dsi);
+               goto err_dsi_device;
+       }
+
+       adv->dsi = dsi;
+
+       dsi->lanes = adv->num_dsi_lanes;
+       dsi->format = MIPI_DSI_FMT_RGB888;
+       dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+                         MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+       ret = mipi_dsi_attach(dsi);
+       if (ret < 0) {
+               dev_err(dev, "failed to attach dsi to host\n");
+               goto err_dsi_attach;
+       }
+
+       return 0;
+
+err_dsi_attach:
+       mipi_dsi_device_unregister(dsi);
+err_dsi_device:
+       return ret;
+}
+
+void adv7533_detach_dsi(struct adv7511 *adv)
+{
+       mipi_dsi_detach(adv->dsi);
+       mipi_dsi_device_unregister(adv->dsi);
+}
+
+int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+{
+       u32 num_lanes;
+       struct device_node *endpoint;
+
+       of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+       if (num_lanes < 1 || num_lanes > 4)
+               return -EINVAL;
+
+       adv->num_dsi_lanes = num_lanes;
+
+       endpoint = of_graph_get_next_endpoint(np, NULL);
+       if (!endpoint)
+               return -ENODEV;
+
+       adv->host_node = of_graph_get_remote_port_parent(endpoint);
+       if (!adv->host_node) {
+               of_node_put(endpoint);
+               return -ENODEV;
+       }
+
+       of_node_put(endpoint);
+       of_node_put(adv->host_node);
+
+       adv->use_timing_gen = !of_property_read_bool(np,
+                                               "adi,disable-timing-generator");
+
+       /* TODO: Check if these need to be parsed by DT or not */
+       adv->rgb = true;
+       adv->embedded_sync = false;
+
+       return 0;
+}
index d087b054c36041424b04a8cea291a8c6670b162a..f9f03bcba0af56c8c81926ea322e0c52817fff33 100644 (file)
@@ -986,16 +986,8 @@ unlock:
        return num_modes;
 }
 
-static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector)
-{
-       struct anx78xx *anx78xx = connector_to_anx78xx(connector);
-
-       return anx78xx->bridge.encoder;
-}
-
 static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
        .get_modes = anx78xx_get_modes,
-       .best_encoder = anx78xx_best_encoder,
 };
 
 static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
index 7699597070a13d09909e64780faba937a2b13b8c..32715daf73cb70b491c808ef480be679d73db76b 100644 (file)
@@ -938,7 +938,7 @@ int analogix_dp_get_modes(struct drm_connector *connector)
                num_modes += drm_panel_get_modes(dp->plat_data->panel);
 
        if (dp->plat_data->get_modes)
-               num_modes += dp->plat_data->get_modes(dp->plat_data);
+               num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
 
        return num_modes;
 }
@@ -1208,6 +1208,7 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
 
        switch (dp->plat_data->dev_type) {
        case RK3288_DP:
+       case RK3399_EDP:
                /*
                 * Like Rk3288 DisplayPort TRM indicate that "Main link
                 * containing 4 physical lanes of 2.7/1.62 Gbps/lane".
index f09275d40f704b5a2aa505b7e7f51493cf80d150..b45638043ec41308681902d06907786a58456522 100644 (file)
@@ -127,10 +127,10 @@ enum analog_power_block {
 };
 
 enum dp_irq_type {
-       DP_IRQ_TYPE_HP_CABLE_IN,
-       DP_IRQ_TYPE_HP_CABLE_OUT,
-       DP_IRQ_TYPE_HP_CHANGE,
-       DP_IRQ_TYPE_UNKNOWN,
+       DP_IRQ_TYPE_HP_CABLE_IN  = BIT(0),
+       DP_IRQ_TYPE_HP_CABLE_OUT = BIT(1),
+       DP_IRQ_TYPE_HP_CHANGE    = BIT(2),
+       DP_IRQ_TYPE_UNKNOWN      = BIT(3),
 };
 
 struct video_info {
index 49205ef02be3afed044a2f58d4dce13f985278a2..48030f0cf4971672df899c9ca2cf2cac41f07fad 100644 (file)
@@ -74,8 +74,12 @@ void analogix_dp_init_analog_param(struct analogix_dp_device *dp)
        reg = SEL_24M | TX_DVDD_BIT_1_0625V;
        writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2);
 
-       if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) {
-               writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
+       if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
+               reg = REF_CLK_24M;
+               if (dp->plat_data->dev_type == RK3288_DP)
+                       reg ^= REF_CLK_MASK;
+
+               writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
                writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2);
                writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3);
                writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4);
@@ -244,7 +248,7 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
        u32 reg;
        u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
 
-       if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+       if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
                phy_pd_addr = ANALOGIX_DP_PD;
 
        switch (block) {
@@ -448,7 +452,7 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
        analogix_dp_reset_aux(dp);
 
        /* Disable AUX transaction H/W retry */
-       if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+       if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
                reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
                      AUX_HW_RETRY_COUNT_SEL(3) |
                      AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
index 337912b0aeabc9382f9ffdbd546ecddba1b691da..cdcc6c5add5e9c27befb644520784edaec9c6a4a 100644 (file)
 #define HSYNC_POLARITY_CFG                     (0x1 << 0)
 
 /* ANALOGIX_DP_PLL_REG_1 */
-#define REF_CLK_24M                            (0x1 << 1)
-#define REF_CLK_27M                            (0x0 << 1)
+#define REF_CLK_24M                            (0x1 << 0)
+#define REF_CLK_27M                            (0x0 << 0)
+#define REF_CLK_MASK                           (0x1 << 0)
 
 /* ANALOGIX_DP_LANE_MAP */
 #define LANE3_MAP_LOGIC_LANE_0                 (0x0 << 6)
index c9d941283d309a847d9a1f4a4708c91f5e7063f0..77ab47341658a2e109b323ee9393fc5456bdda12 100644 (file)
@@ -1476,15 +1476,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector,
        return mode_status;
 }
 
-static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector
-                                                          *connector)
-{
-       struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
-                                            connector);
-
-       return hdmi->encoder;
-}
-
 static void dw_hdmi_connector_destroy(struct drm_connector *connector)
 {
        drm_connector_unregister(connector);
@@ -1504,14 +1495,6 @@ static void dw_hdmi_connector_force(struct drm_connector *connector)
 }
 
 static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
-       .fill_modes = drm_helper_probe_single_connector_modes,
-       .detect = dw_hdmi_connector_detect,
-       .destroy = dw_hdmi_connector_destroy,
-       .force = dw_hdmi_connector_force,
-};
-
-static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = dw_hdmi_connector_detect,
@@ -1525,7 +1508,7 @@ static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
 static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
        .get_modes = dw_hdmi_connector_get_modes,
        .mode_valid = dw_hdmi_connector_mode_valid,
-       .best_encoder = dw_hdmi_connector_best_encoder,
+       .best_encoder = drm_atomic_helper_best_encoder,
 };
 
 static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
@@ -1643,14 +1626,9 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi)
        drm_connector_helper_add(&hdmi->connector,
                                 &dw_hdmi_connector_helper_funcs);
 
-       if (drm_core_check_feature(drm, DRIVER_ATOMIC))
-               drm_connector_init(drm, &hdmi->connector,
-                                  &dw_hdmi_atomic_connector_funcs,
-                                  DRM_MODE_CONNECTOR_HDMIA);
-       else
-               drm_connector_init(drm, &hdmi->connector,
-                                  &dw_hdmi_connector_funcs,
-                                  DRM_MODE_CONNECTOR_HDMIA);
+       drm_connector_init(drm, &hdmi->connector,
+                          &dw_hdmi_connector_funcs,
+                          DRM_MODE_CONNECTOR_HDMIA);
 
        drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
 
index 7ecd59f70b8ea72c27bf8bd12c7fb8a9d97fba55..93f3dacf9e27170d11ef17d66d4e6f994a91b2cc 100644 (file)
@@ -235,16 +235,8 @@ out:
        return num_modes;
 }
 
-static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
-{
-       struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
-
-       return ptn_bridge->bridge.encoder;
-}
-
 static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
        .get_modes = ptn3460_get_modes,
-       .best_encoder = ptn3460_best_encoder,
 };
 
 static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
index be881e9fef8f0bd5f5f65f285d03cf4c064241a1..5cd8dd7e5904e701ade77f2f652238f0bcdfbfd1 100644 (file)
@@ -474,18 +474,8 @@ static int ps8622_get_modes(struct drm_connector *connector)
        return drm_panel_get_modes(ps8622->panel);
 }
 
-static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
-{
-       struct ps8622_bridge *ps8622;
-
-       ps8622 = connector_to_ps8622(connector);
-
-       return ps8622->bridge.encoder;
-}
-
 static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
        .get_modes = ps8622_get_modes,
-       .best_encoder = ps8622_best_encoder,
 };
 
 static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
new file mode 100644 (file)
index 0000000..9126d03
--- /dev/null
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) 2016 Atmel
+ *                   Bo Shen <voice.shen@atmel.com>
+ *
+ * Authors:          Bo Shen <voice.shen@atmel.com>
+ *                   Boris Brezillon <boris.brezillon@free-electrons.com>
+ *                   Wu, Songjun <Songjun.Wu@atmel.com>
+ *
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+#define SII902X_TPI_VIDEO_DATA                 0x0
+
+#define SII902X_TPI_PIXEL_REPETITION           0x8
+#define SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT     BIT(5)
+#define SII902X_TPI_AVI_PIXEL_REP_RISING_EDGE   BIT(4)
+#define SII902X_TPI_AVI_PIXEL_REP_4X           3
+#define SII902X_TPI_AVI_PIXEL_REP_2X           1
+#define SII902X_TPI_AVI_PIXEL_REP_NONE         0
+#define SII902X_TPI_CLK_RATIO_HALF             (0 << 6)
+#define SII902X_TPI_CLK_RATIO_1X               (1 << 6)
+#define SII902X_TPI_CLK_RATIO_2X               (2 << 6)
+#define SII902X_TPI_CLK_RATIO_4X               (3 << 6)
+
+#define SII902X_TPI_AVI_IN_FORMAT              0x9
+#define SII902X_TPI_AVI_INPUT_BITMODE_12BIT    BIT(7)
+#define SII902X_TPI_AVI_INPUT_DITHER           BIT(6)
+#define SII902X_TPI_AVI_INPUT_RANGE_LIMITED    (2 << 2)
+#define SII902X_TPI_AVI_INPUT_RANGE_FULL       (1 << 2)
+#define SII902X_TPI_AVI_INPUT_RANGE_AUTO       (0 << 2)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_BLACK (3 << 0)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV422        (2 << 0)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV444        (1 << 0)
+#define SII902X_TPI_AVI_INPUT_COLORSPACE_RGB   (0 << 0)
+
+#define SII902X_TPI_AVI_INFOFRAME              0x0c
+
+#define SII902X_SYS_CTRL_DATA                  0x1a
+#define SII902X_SYS_CTRL_PWR_DWN               BIT(4)
+#define SII902X_SYS_CTRL_AV_MUTE               BIT(3)
+#define SII902X_SYS_CTRL_DDC_BUS_REQ           BIT(2)
+#define SII902X_SYS_CTRL_DDC_BUS_GRTD          BIT(1)
+#define SII902X_SYS_CTRL_OUTPUT_MODE           BIT(0)
+#define SII902X_SYS_CTRL_OUTPUT_HDMI           1
+#define SII902X_SYS_CTRL_OUTPUT_DVI            0
+
+#define SII902X_REG_CHIPID(n)                  (0x1b + (n))
+
+#define SII902X_PWR_STATE_CTRL                 0x1e
+#define SII902X_AVI_POWER_STATE_MSK            GENMASK(1, 0)
+#define SII902X_AVI_POWER_STATE_D(l)           ((l) & SII902X_AVI_POWER_STATE_MSK)
+
+#define SII902X_INT_ENABLE                     0x3c
+#define SII902X_INT_STATUS                     0x3d
+#define SII902X_HOTPLUG_EVENT                  BIT(0)
+#define SII902X_PLUGGED_STATUS                 BIT(2)
+
+#define SII902X_REG_TPI_RQB                    0xc7
+
+#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500
+
+struct sii902x {
+       struct i2c_client *i2c;
+       struct regmap *regmap;
+       struct drm_bridge bridge;
+       struct drm_connector connector;
+       struct gpio_desc *reset_gpio;
+};
+
+static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
+{
+       return container_of(bridge, struct sii902x, bridge);
+}
+
+static inline struct sii902x *connector_to_sii902x(struct drm_connector *con)
+{
+       return container_of(con, struct sii902x, connector);
+}
+
+static void sii902x_reset(struct sii902x *sii902x)
+{
+       if (!sii902x->reset_gpio)
+               return;
+
+       gpiod_set_value(sii902x->reset_gpio, 1);
+
+       /* The datasheet says treset-min = 100us. Make it 150us to be sure. */
+       usleep_range(150, 200);
+
+       gpiod_set_value(sii902x->reset_gpio, 0);
+}
+
+static enum drm_connector_status
+sii902x_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct sii902x *sii902x = connector_to_sii902x(connector);
+       unsigned int status;
+
+       regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+
+       return (status & SII902X_PLUGGED_STATUS) ?
+              connector_status_connected : connector_status_disconnected;
+}
+
+static const struct drm_connector_funcs sii902x_connector_funcs = {
+       .dpms = drm_atomic_helper_connector_dpms,
+       .detect = sii902x_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int sii902x_get_modes(struct drm_connector *connector)
+{
+       struct sii902x *sii902x = connector_to_sii902x(connector);
+       struct regmap *regmap = sii902x->regmap;
+       u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+       unsigned long timeout;
+       unsigned int status;
+       struct edid *edid;
+       int num = 0;
+       int ret;
+
+       ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
+                                SII902X_SYS_CTRL_DDC_BUS_REQ,
+                                SII902X_SYS_CTRL_DDC_BUS_REQ);
+       if (ret)
+               return ret;
+
+       timeout = jiffies +
+                 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+       do {
+               ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+               if (ret)
+                       return ret;
+       } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+                time_before(jiffies, timeout));
+
+       if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+               dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus");
+               return -ETIMEDOUT;
+       }
+
+       ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
+       if (ret)
+               return ret;
+
+       edid = drm_get_edid(connector, sii902x->i2c->adapter);
+       drm_mode_connector_update_edid_property(connector, edid);
+       if (edid) {
+               num = drm_add_edid_modes(connector, edid);
+               kfree(edid);
+       }
+
+       ret = drm_display_info_set_bus_formats(&connector->display_info,
+                                              &bus_format, 1);
+       if (ret)
+               return ret;
+
+       ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+       if (ret)
+               return ret;
+
+       ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
+                                SII902X_SYS_CTRL_DDC_BUS_REQ |
+                                SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
+       if (ret)
+               return ret;
+
+       timeout = jiffies +
+                 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+       do {
+               ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
+               if (ret)
+                       return ret;
+       } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+                          SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+                time_before(jiffies, timeout));
+
+       if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+                     SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+               dev_err(&sii902x->i2c->dev, "failed to release the i2c bus");
+               return -ETIMEDOUT;
+       }
+
+       return num;
+}
+
+static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector,
+                                              struct drm_display_mode *mode)
+{
+       /* TODO: check mode */
+
+       return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs = {
+       .get_modes = sii902x_get_modes,
+       .mode_valid = sii902x_mode_valid,
+};
+
+static void sii902x_bridge_disable(struct drm_bridge *bridge)
+{
+       struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+       regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+                          SII902X_SYS_CTRL_PWR_DWN,
+                          SII902X_SYS_CTRL_PWR_DWN);
+}
+
+static void sii902x_bridge_enable(struct drm_bridge *bridge)
+{
+       struct sii902x *sii902x = bridge_to_sii902x(bridge);
+
+       regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL,
+                          SII902X_AVI_POWER_STATE_MSK,
+                          SII902X_AVI_POWER_STATE_D(0));
+       regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+                          SII902X_SYS_CTRL_PWR_DWN, 0);
+}
+
+static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
+                                   struct drm_display_mode *mode,
+                                   struct drm_display_mode *adj)
+{
+       struct sii902x *sii902x = bridge_to_sii902x(bridge);
+       struct regmap *regmap = sii902x->regmap;
+       u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
+       struct hdmi_avi_infoframe frame;
+       int ret;
+
+       buf[0] = adj->clock;
+       buf[1] = adj->clock >> 8;
+       buf[2] = adj->vrefresh;
+       buf[3] = 0x00;
+       buf[4] = adj->hdisplay;
+       buf[5] = adj->hdisplay >> 8;
+       buf[6] = adj->vdisplay;
+       buf[7] = adj->vdisplay >> 8;
+       buf[8] = SII902X_TPI_CLK_RATIO_1X | SII902X_TPI_AVI_PIXEL_REP_NONE |
+                SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT;
+       buf[9] = SII902X_TPI_AVI_INPUT_RANGE_AUTO |
+                SII902X_TPI_AVI_INPUT_COLORSPACE_RGB;
+
+       ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
+       if (ret)
+               return;
+
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj);
+       if (ret < 0) {
+               DRM_ERROR("couldn't fill AVI infoframe\n");
+               return;
+       }
+
+       ret = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf));
+       if (ret < 0) {
+               DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
+               return;
+       }
+
+       /* Do not send the infoframe header, but keep the CRC field. */
+       regmap_bulk_write(regmap, SII902X_TPI_AVI_INFOFRAME,
+                         buf + HDMI_INFOFRAME_HEADER_SIZE - 1,
+                         HDMI_AVI_INFOFRAME_SIZE + 1);
+}
+
+static int sii902x_bridge_attach(struct drm_bridge *bridge)
+{
+       struct sii902x *sii902x = bridge_to_sii902x(bridge);
+       struct drm_device *drm = bridge->dev;
+       int ret;
+
+       drm_connector_helper_add(&sii902x->connector,
+                                &sii902x_connector_helper_funcs);
+
+       if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
+               dev_err(&sii902x->i2c->dev,
+                       "sii902x driver is only compatible with DRM devices supporting atomic updates");
+               return -ENOTSUPP;
+       }
+
+       ret = drm_connector_init(drm, &sii902x->connector,
+                                &sii902x_connector_funcs,
+                                DRM_MODE_CONNECTOR_HDMIA);
+       if (ret)
+               return ret;
+
+       if (sii902x->i2c->irq > 0)
+               sii902x->connector.polled = DRM_CONNECTOR_POLL_HPD;
+       else
+               sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+
+       drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder);
+
+       return 0;
+}
+
+static const struct drm_bridge_funcs sii902x_bridge_funcs = {
+       .attach = sii902x_bridge_attach,
+       .mode_set = sii902x_bridge_mode_set,
+       .disable = sii902x_bridge_disable,
+       .enable = sii902x_bridge_enable,
+};
+
+static const struct regmap_range sii902x_volatile_ranges[] = {
+       { .range_min = 0, .range_max = 0xff },
+};
+
+static const struct regmap_access_table sii902x_volatile_table = {
+       .yes_ranges = sii902x_volatile_ranges,
+       .n_yes_ranges = ARRAY_SIZE(sii902x_volatile_ranges),
+};
+
+static const struct regmap_config sii902x_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .volatile_table = &sii902x_volatile_table,
+       .cache_type = REGCACHE_NONE,
+};
+
+static irqreturn_t sii902x_interrupt(int irq, void *data)
+{
+       struct sii902x *sii902x = data;
+       unsigned int status = 0;
+
+       regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+       regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
+
+       if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev)
+               drm_helper_hpd_irq_event(sii902x->bridge.dev);
+
+       return IRQ_HANDLED;
+}
+
+static int sii902x_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       unsigned int status = 0;
+       struct sii902x *sii902x;
+       u8 chipid[4];
+       int ret;
+
+       sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
+       if (!sii902x)
+               return -ENOMEM;
+
+       sii902x->i2c = client;
+       sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
+       if (IS_ERR(sii902x->regmap))
+               return PTR_ERR(sii902x->regmap);
+
+       sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+                                                     GPIOD_OUT_LOW);
+       if (IS_ERR(sii902x->reset_gpio)) {
+               dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n",
+                       PTR_ERR(sii902x->reset_gpio));
+               return PTR_ERR(sii902x->reset_gpio);
+       }
+
+       sii902x_reset(sii902x);
+
+       ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
+       if (ret)
+               return ret;
+
+       ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0),
+                              &chipid, 4);
+       if (ret) {
+               dev_err(dev, "regmap_read failed %d\n", ret);
+               return ret;
+       }
+
+       if (chipid[0] != 0xb0) {
+               dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n",
+                       chipid[0]);
+               return -EINVAL;
+       }
+
+       /* Clear all pending interrupts */
+       regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
+       regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
+
+       if (client->irq > 0) {
+               regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
+                            SII902X_HOTPLUG_EVENT);
+
+               ret = devm_request_threaded_irq(dev, client->irq, NULL,
+                                               sii902x_interrupt,
+                                               IRQF_ONESHOT, dev_name(dev),
+                                               sii902x);
+               if (ret)
+                       return ret;
+       }
+
+       sii902x->bridge.funcs = &sii902x_bridge_funcs;
+       sii902x->bridge.of_node = dev->of_node;
+       ret = drm_bridge_add(&sii902x->bridge);
+       if (ret) {
+               dev_err(dev, "Failed to add drm_bridge\n");
+               return ret;
+       }
+
+       i2c_set_clientdata(client, sii902x);
+
+       return 0;
+}
+
+static int sii902x_remove(struct i2c_client *client)
+
+{
+       struct sii902x *sii902x = i2c_get_clientdata(client);
+
+       drm_bridge_remove(&sii902x->bridge);
+
+       return 0;
+}
+
+static const struct of_device_id sii902x_dt_ids[] = {
+       { .compatible = "sil,sii9022", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sii902x_dt_ids);
+
+static const struct i2c_device_id sii902x_i2c_ids[] = {
+       { "sii9022", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
+
+static struct i2c_driver sii902x_driver = {
+       .probe = sii902x_probe,
+       .remove = sii902x_remove,
+       .driver = {
+               .name = "sii902x",
+               .of_match_table = sii902x_dt_ids,
+       },
+       .id_table = sii902x_i2c_ids,
+};
+module_i2c_driver(sii902x_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("SII902x RGB -> HDMI bridges");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
new file mode 100644 (file)
index 0000000..a09825d
--- /dev/null
@@ -0,0 +1,1413 @@
+/*
+ * tc358767 eDP bridge driver
+ *
+ * Copyright (C) 2016 CogentEmbedded Inc
+ * Author: Andrey Gusakov <andrey.gusakov@cogentembedded.com>
+ *
+ * Copyright (C) 2016 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
+ *
+ * Initially based on: drivers/gpu/drm/i2c/tda998x_drv.c
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+/* Registers */
+
+/* Display Parallel Interface */
+#define DPIPXLFMT              0x0440
+#define VS_POL_ACTIVE_LOW              (1 << 10)
+#define HS_POL_ACTIVE_LOW              (1 << 9)
+#define DE_POL_ACTIVE_HIGH             (0 << 8)
+#define SUB_CFG_TYPE_CONFIG1           (0 << 2) /* LSB aligned */
+#define SUB_CFG_TYPE_CONFIG2           (1 << 2) /* Loosely Packed */
+#define SUB_CFG_TYPE_CONFIG3           (2 << 2) /* LSB aligned 8-bit */
+#define DPI_BPP_RGB888                 (0 << 0)
+#define DPI_BPP_RGB666                 (1 << 0)
+#define DPI_BPP_RGB565                 (2 << 0)
+
+/* Video Path */
+#define VPCTRL0                        0x0450
+#define OPXLFMT_RGB666                 (0 << 8)
+#define OPXLFMT_RGB888                 (1 << 8)
+#define FRMSYNC_DISABLED               (0 << 4) /* Video Timing Gen Disabled */
+#define FRMSYNC_ENABLED                        (1 << 4) /* Video Timing Gen Enabled */
+#define MSF_DISABLED                   (0 << 0) /* Magic Square FRC disabled */
+#define MSF_ENABLED                    (1 << 0) /* Magic Square FRC enabled */
+#define HTIM01                 0x0454
+#define HTIM02                 0x0458
+#define VTIM01                 0x045c
+#define VTIM02                 0x0460
+#define VFUEN0                 0x0464
+#define VFUEN                          BIT(0)   /* Video Frame Timing Upload */
+
+/* System */
+#define TC_IDREG               0x0500
+#define SYSCTRL                        0x0510
+#define DP0_AUDSRC_NO_INPUT            (0 << 3)
+#define DP0_AUDSRC_I2S_RX              (1 << 3)
+#define DP0_VIDSRC_NO_INPUT            (0 << 0)
+#define DP0_VIDSRC_DSI_RX              (1 << 0)
+#define DP0_VIDSRC_DPI_RX              (2 << 0)
+#define DP0_VIDSRC_COLOR_BAR           (3 << 0)
+
+/* Control */
+#define DP0CTL                 0x0600
+#define VID_MN_GEN                     BIT(6)   /* Auto-generate M/N values */
+#define EF_EN                          BIT(5)   /* Enable Enhanced Framing */
+#define VID_EN                         BIT(1)   /* Video transmission enable */
+#define DP_EN                          BIT(0)   /* Enable DPTX function */
+
+/* Clocks */
+#define DP0_VIDMNGEN0          0x0610
+#define DP0_VIDMNGEN1          0x0614
+#define DP0_VMNGENSTATUS       0x0618
+
+/* Main Channel */
+#define DP0_SECSAMPLE          0x0640
+#define DP0_VIDSYNCDELAY       0x0644
+#define DP0_TOTALVAL           0x0648
+#define DP0_STARTVAL           0x064c
+#define DP0_ACTIVEVAL          0x0650
+#define DP0_SYNCVAL            0x0654
+#define DP0_MISC               0x0658
+#define TU_SIZE_RECOMMENDED            (0x3f << 16) /* LSCLK cycles per TU */
+#define BPC_6                          (0 << 5)
+#define BPC_8                          (1 << 5)
+
+/* AUX channel */
+#define DP0_AUXCFG0            0x0660
+#define DP0_AUXCFG1            0x0664
+#define AUX_RX_FILTER_EN               BIT(16)
+
+#define DP0_AUXADDR            0x0668
+#define DP0_AUXWDATA(i)                (0x066c + (i) * 4)
+#define DP0_AUXRDATA(i)                (0x067c + (i) * 4)
+#define DP0_AUXSTATUS          0x068c
+#define AUX_STATUS_MASK                        0xf0
+#define AUX_STATUS_SHIFT               4
+#define AUX_TIMEOUT                    BIT(1)
+#define AUX_BUSY                       BIT(0)
+#define DP0_AUXI2CADR          0x0698
+
+/* Link Training */
+#define DP0_SRCCTRL            0x06a0
+#define DP0_SRCCTRL_SCRMBLDIS          BIT(13)
+#define DP0_SRCCTRL_EN810B             BIT(12)
+#define DP0_SRCCTRL_NOTP               (0 << 8)
+#define DP0_SRCCTRL_TP1                        (1 << 8)
+#define DP0_SRCCTRL_TP2                        (2 << 8)
+#define DP0_SRCCTRL_LANESKEW           BIT(7)
+#define DP0_SRCCTRL_SSCG               BIT(3)
+#define DP0_SRCCTRL_LANES_1            (0 << 2)
+#define DP0_SRCCTRL_LANES_2            (1 << 2)
+#define DP0_SRCCTRL_BW27               (1 << 1)
+#define DP0_SRCCTRL_BW162              (0 << 1)
+#define DP0_SRCCTRL_AUTOCORRECT                BIT(0)
+#define DP0_LTSTAT             0x06d0
+#define LT_LOOPDONE                    BIT(13)
+#define LT_STATUS_MASK                 (0x1f << 8)
+#define LT_CHANNEL1_EQ_BITS            (DP_CHANNEL_EQ_BITS << 4)
+#define LT_INTERLANE_ALIGN_DONE                BIT(3)
+#define LT_CHANNEL0_EQ_BITS            (DP_CHANNEL_EQ_BITS)
+#define DP0_SNKLTCHGREQ                0x06d4
+#define DP0_LTLOOPCTRL         0x06d8
+#define DP0_SNKLTCTRL          0x06e4
+
+/* PHY */
+#define DP_PHY_CTRL            0x0800
+#define DP_PHY_RST                     BIT(28)  /* DP PHY Global Soft Reset */
+#define BGREN                          BIT(25)  /* AUX PHY BGR Enable */
+#define PWR_SW_EN                      BIT(24)  /* PHY Power Switch Enable */
+#define PHY_M1_RST                     BIT(12)  /* Reset PHY1 Main Channel */
+#define PHY_RDY                                BIT(16)  /* PHY Main Channels Ready */
+#define PHY_M0_RST                     BIT(8)   /* Reset PHY0 Main Channel */
+#define PHY_A0_EN                      BIT(1)   /* PHY Aux Channel0 Enable */
+#define PHY_M0_EN                      BIT(0)   /* PHY Main Channel0 Enable */
+
+/* PLL */
+#define DP0_PLLCTRL            0x0900
+#define DP1_PLLCTRL            0x0904  /* not defined in DS */
+#define PXL_PLLCTRL            0x0908
+#define PLLUPDATE                      BIT(2)
+#define PLLBYP                         BIT(1)
+#define PLLEN                          BIT(0)
+#define PXL_PLLPARAM           0x0914
+#define IN_SEL_REFCLK                  (0 << 14)
+#define SYS_PLLPARAM           0x0918
+#define REF_FREQ_38M4                  (0 << 8) /* 38.4 MHz */
+#define REF_FREQ_19M2                  (1 << 8) /* 19.2 MHz */
+#define REF_FREQ_26M                   (2 << 8) /* 26 MHz */
+#define REF_FREQ_13M                   (3 << 8) /* 13 MHz */
+#define SYSCLK_SEL_LSCLK               (0 << 4)
+#define LSCLK_DIV_1                    (0 << 0)
+#define LSCLK_DIV_2                    (1 << 0)
+
+/* Test & Debug */
+#define TSTCTL                 0x0a00
+#define PLL_DBG                        0x0a04
+
+static bool tc_test_pattern;
+module_param_named(test, tc_test_pattern, bool, 0644);
+
+struct tc_edp_link {
+       struct drm_dp_link      base;
+       u8                      assr;
+       int                     scrambler_dis;
+       int                     spread;
+       int                     coding8b10b;
+       u8                      swing;
+       u8                      preemp;
+};
+
+struct tc_data {
+       struct device           *dev;
+       struct regmap           *regmap;
+       struct drm_dp_aux       aux;
+
+       struct drm_bridge       bridge;
+       struct drm_connector    connector;
+       struct drm_panel        *panel;
+
+       /* link settings */
+       struct tc_edp_link      link;
+
+       /* display edid */
+       struct edid             *edid;
+       /* current mode */
+       struct drm_display_mode *mode;
+
+       u32                     rev;
+       u8                      assr;
+
+       struct gpio_desc        *sd_gpio;
+       struct gpio_desc        *reset_gpio;
+       struct clk              *refclk;
+};
+
+static inline struct tc_data *aux_to_tc(struct drm_dp_aux *a)
+{
+       return container_of(a, struct tc_data, aux);
+}
+
+static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
+{
+       return container_of(b, struct tc_data, bridge);
+}
+
+static inline struct tc_data *connector_to_tc(struct drm_connector *c)
+{
+       return container_of(c, struct tc_data, connector);
+}
+
+/* Simple macros to avoid repeated error checks */
+#define tc_write(reg, var)                                     \
+       do {                                                    \
+               ret = regmap_write(tc->regmap, reg, var);       \
+               if (ret)                                        \
+                       goto err;                               \
+       } while (0)
+#define tc_read(reg, var)                                      \
+       do {                                                    \
+               ret = regmap_read(tc->regmap, reg, var);        \
+               if (ret)                                        \
+                       goto err;                               \
+       } while (0)
+
+static inline int tc_poll_timeout(struct regmap *map, unsigned int addr,
+                                 unsigned int cond_mask,
+                                 unsigned int cond_value,
+                                 unsigned long sleep_us, u64 timeout_us)
+{
+       ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+       unsigned int val;
+       int ret;
+
+       for (;;) {
+               ret = regmap_read(map, addr, &val);
+               if (ret)
+                       break;
+               if ((val & cond_mask) == cond_value)
+                       break;
+               if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) {
+                       ret = regmap_read(map, addr, &val);
+                       break;
+               }
+               if (sleep_us)
+                       usleep_range((sleep_us >> 2) + 1, sleep_us);
+       }
+       return ret ?: (((val & cond_mask) == cond_value) ? 0 : -ETIMEDOUT);
+}
+
+static int tc_aux_wait_busy(struct tc_data *tc, unsigned int timeout_ms)
+{
+       return tc_poll_timeout(tc->regmap, DP0_AUXSTATUS, AUX_BUSY, 0,
+                              1000, 1000 * timeout_ms);
+}
+
+static int tc_aux_get_status(struct tc_data *tc, u8 *reply)
+{
+       int ret;
+       u32 value;
+
+       ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &value);
+       if (ret < 0)
+               return ret;
+       if (value & AUX_BUSY) {
+               if (value & AUX_TIMEOUT) {
+                       dev_err(tc->dev, "i2c access timeout!\n");
+                       return -ETIMEDOUT;
+               }
+               return -EBUSY;
+       }
+
+       *reply = (value & AUX_STATUS_MASK) >> AUX_STATUS_SHIFT;
+       return 0;
+}
+
+static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
+                              struct drm_dp_aux_msg *msg)
+{
+       struct tc_data *tc = aux_to_tc(aux);
+       size_t size = min_t(size_t, 8, msg->size);
+       u8 request = msg->request & ~DP_AUX_I2C_MOT;
+       u8 *buf = msg->buffer;
+       u32 tmp = 0;
+       int i = 0;
+       int ret;
+
+       if (size == 0)
+               return 0;
+
+       ret = tc_aux_wait_busy(tc, 100);
+       if (ret)
+               goto err;
+
+       if (request == DP_AUX_I2C_WRITE || request == DP_AUX_NATIVE_WRITE) {
+               /* Store data */
+               while (i < size) {
+                       if (request == DP_AUX_NATIVE_WRITE)
+                               tmp = tmp | (buf[i] << (8 * (i & 0x3)));
+                       else
+                               tmp = (tmp << 8) | buf[i];
+                       i++;
+                       if (((i % 4) == 0) || (i == size)) {
+                               tc_write(DP0_AUXWDATA(i >> 2), tmp);
+                               tmp = 0;
+                       }
+               }
+       } else if (request != DP_AUX_I2C_READ &&
+                  request != DP_AUX_NATIVE_READ) {
+               return -EINVAL;
+       }
+
+       /* Store address */
+       tc_write(DP0_AUXADDR, msg->address);
+       /* Start transfer */
+       tc_write(DP0_AUXCFG0, ((size - 1) << 8) | request);
+
+       ret = tc_aux_wait_busy(tc, 100);
+       if (ret)
+               goto err;
+
+       ret = tc_aux_get_status(tc, &msg->reply);
+       if (ret)
+               goto err;
+
+       if (request == DP_AUX_I2C_READ || request == DP_AUX_NATIVE_READ) {
+               /* Read data */
+               while (i < size) {
+                       if ((i % 4) == 0)
+                               tc_read(DP0_AUXRDATA(i >> 2), &tmp);
+                       buf[i] = tmp & 0xff;
+                       tmp = tmp >> 8;
+                       i++;
+               }
+       }
+
+       return size;
+err:
+       return ret;
+}
+
+static const char * const training_pattern1_errors[] = {
+       "No errors",
+       "Aux write error",
+       "Aux read error",
+       "Max voltage reached error",
+       "Loop counter expired error",
+       "res", "res", "res"
+};
+
+static const char * const training_pattern2_errors[] = {
+       "No errors",
+       "Aux write error",
+       "Aux read error",
+       "Clock recovery failed error",
+       "Loop counter expired error",
+       "res", "res", "res"
+};
+
+static u32 tc_srcctrl(struct tc_data *tc)
+{
+       /*
+        * No training pattern, skew lane 1 data by two LSCLK cycles with
+        * respect to lane 0 data, AutoCorrect Mode = 0
+        */
+       u32 reg = DP0_SRCCTRL_NOTP | DP0_SRCCTRL_LANESKEW;
+
+       if (tc->link.scrambler_dis)
+               reg |= DP0_SRCCTRL_SCRMBLDIS;   /* Scrambler Disabled */
+       if (tc->link.coding8b10b)
+               /* Enable 8/10B Encoder (TxData[19:16] not used) */
+               reg |= DP0_SRCCTRL_EN810B;
+       if (tc->link.spread)
+               reg |= DP0_SRCCTRL_SSCG;        /* Spread Spectrum Enable */
+       if (tc->link.base.num_lanes == 2)
+               reg |= DP0_SRCCTRL_LANES_2;     /* Two Main Channel Lanes */
+       if (tc->link.base.rate != 162000)
+               reg |= DP0_SRCCTRL_BW27;        /* 2.7 Gbps link */
+       return reg;
+}
+
+static void tc_wait_pll_lock(struct tc_data *tc)
+{
+       /* Wait for PLL to lock: up to 2.09 ms, depending on refclk */
+       usleep_range(3000, 6000);
+}
+
+static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
+{
+       int ret;
+       int i_pre, best_pre = 1;
+       int i_post, best_post = 1;
+       int div, best_div = 1;
+       int mul, best_mul = 1;
+       int delta, best_delta;
+       int ext_div[] = {1, 2, 3, 5, 7};
+       int best_pixelclock = 0;
+       int vco_hi = 0;
+
+       dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock,
+               refclk);
+       best_delta = pixelclock;
+       /* Loop over all possible ext_divs, skipping invalid configurations */
+       for (i_pre = 0; i_pre < ARRAY_SIZE(ext_div); i_pre++) {
+               /*
+                * refclk / ext_pre_div should be in the 1 to 200 MHz range.
+                * We don't allow any refclk > 200 MHz, only check lower bounds.
+                */
+               if (refclk / ext_div[i_pre] < 1000000)
+                       continue;
+               for (i_post = 0; i_post < ARRAY_SIZE(ext_div); i_post++) {
+                       for (div = 1; div <= 16; div++) {
+                               u32 clk;
+                               u64 tmp;
+
+                               tmp = pixelclock * ext_div[i_pre] *
+                                     ext_div[i_post] * div;
+                               do_div(tmp, refclk);
+                               mul = tmp;
+
+                               /* Check limits */
+                               if ((mul < 1) || (mul > 128))
+                                       continue;
+
+                               clk = (refclk / ext_div[i_pre] / div) * mul;
+                               /*
+                                * refclk * mul / (ext_pre_div * pre_div)
+                                * should be in the 150 to 650 MHz range
+                                */
+                               if ((clk > 650000000) || (clk < 150000000))
+                                       continue;
+
+                               clk = clk / ext_div[i_post];
+                               delta = clk - pixelclock;
+
+                               if (abs(delta) < abs(best_delta)) {
+                                       best_pre = i_pre;
+                                       best_post = i_post;
+                                       best_div = div;
+                                       best_mul = mul;
+                                       best_delta = delta;
+                                       best_pixelclock = clk;
+                               }
+                       }
+               }
+       }
+       if (best_pixelclock == 0) {
+               dev_err(tc->dev, "Failed to calc clock for %d pixelclock\n",
+                       pixelclock);
+               return -EINVAL;
+       }
+
+       dev_dbg(tc->dev, "PLL: got %d, delta %d\n", best_pixelclock,
+               best_delta);
+       dev_dbg(tc->dev, "PLL: %d / %d / %d * %d / %d\n", refclk,
+               ext_div[best_pre], best_div, best_mul, ext_div[best_post]);
+
+       /* if VCO >= 300 MHz */
+       if (refclk / ext_div[best_pre] / best_div * best_mul >= 300000000)
+               vco_hi = 1;
+       /* see DS */
+       if (best_div == 16)
+               best_div = 0;
+       if (best_mul == 128)
+               best_mul = 0;
+
+       /* Power up PLL and switch to bypass */
+       tc_write(PXL_PLLCTRL, PLLBYP | PLLEN);
+
+       tc_write(PXL_PLLPARAM,
+                (vco_hi << 24) |               /* For PLL VCO >= 300 MHz = 1 */
+                (ext_div[best_pre] << 20) |    /* External Pre-divider */
+                (ext_div[best_post] << 16) |   /* External Post-divider */
+                IN_SEL_REFCLK |                /* Use RefClk as PLL input */
+                (best_div << 8) |              /* Divider for PLL RefClk */
+                (best_mul << 0));              /* Multiplier for PLL */
+
+       /* Force PLL parameter update and disable bypass */
+       tc_write(PXL_PLLCTRL, PLLUPDATE | PLLEN);
+
+       tc_wait_pll_lock(tc);
+
+       return 0;
+err:
+       return ret;
+}
+
+static int tc_pxl_pll_dis(struct tc_data *tc)
+{
+       /* Enable PLL bypass, power down PLL */
+       return regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP);
+}
+
+static int tc_stream_clock_calc(struct tc_data *tc)
+{
+       int ret;
+       /*
+        * If the Stream clock and Link Symbol clock are
+        * asynchronous with each other, the value of M changes over
+        * time. This way of generating link clock and stream
+        * clock is called Asynchronous Clock mode. The value M
+        * must change while the value N stays constant. The
+        * value of N in this Asynchronous Clock mode must be set
+        * to 2^15 or 32,768.
+        *
+        * LSCLK = 1/10 of high speed link clock
+        *
+        * f_STRMCLK = M/N * f_LSCLK
+        * M/N = f_STRMCLK / f_LSCLK
+        *
+        */
+       tc_write(DP0_VIDMNGEN1, 32768);
+
+       return 0;
+err:
+       return ret;
+}
+
+static int tc_aux_link_setup(struct tc_data *tc)
+{
+       unsigned long rate;
+       u32 value;
+       int ret;
+
+       rate = clk_get_rate(tc->refclk);
+       switch (rate) {
+       case 38400000:
+               value = REF_FREQ_38M4;
+               break;
+       case 26000000:
+               value = REF_FREQ_26M;
+               break;
+       case 19200000:
+               value = REF_FREQ_19M2;
+               break;
+       case 13000000:
+               value = REF_FREQ_13M;
+               break;
+       default:
+               dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate);
+               return -EINVAL;
+       }
+
+       /* Setup DP-PHY / PLL */
+       value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+       tc_write(SYS_PLLPARAM, value);
+
+       tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
+
+       /*
+        * Initially PLLs are in bypass. Force PLL parameter update,
+        * disable PLL bypass, enable PLL
+        */
+       tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
+       tc_wait_pll_lock(tc);
+
+       tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
+       tc_wait_pll_lock(tc);
+
+       ret = tc_poll_timeout(tc->regmap, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1,
+                             1000);
+       if (ret == -ETIMEDOUT) {
+               dev_err(tc->dev, "Timeout waiting for PHY to become ready");
+               return ret;
+       } else if (ret)
+               goto err;
+
+       /* Setup AUX link */
+       tc_write(DP0_AUXCFG1, AUX_RX_FILTER_EN |
+                (0x06 << 8) |  /* Aux Bit Period Calculator Threshold */
+                (0x3f << 0));  /* Aux Response Timeout Timer */
+
+       return 0;
+err:
+       dev_err(tc->dev, "tc_aux_link_setup failed: %d\n", ret);
+       return ret;
+}
+
+static int tc_get_display_props(struct tc_data *tc)
+{
+       int ret;
+       /* temp buffer */
+       u8 tmp[8];
+
+       /* Read DP Rx Link Capability */
+       ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
+       if (ret < 0)
+               goto err_dpcd_read;
+       if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
+               goto err_dpcd_inval;
+
+       ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
+       if (ret < 0)
+               goto err_dpcd_read;
+       tc->link.spread = tmp[0] & BIT(0); /* 0.5% down spread */
+
+       ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, tmp);
+       if (ret < 0)
+               goto err_dpcd_read;
+       tc->link.coding8b10b = tmp[0] & BIT(0);
+       tc->link.scrambler_dis = 0;
+       /* read assr */
+       ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, tmp);
+       if (ret < 0)
+               goto err_dpcd_read;
+       tc->link.assr = tmp[0] & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+
+       dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
+               tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
+               (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
+               tc->link.base.num_lanes,
+               (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
+               "enhanced" : "non-enhanced");
+       dev_dbg(tc->dev, "ANSI 8B/10B: %d\n", tc->link.coding8b10b);
+       dev_dbg(tc->dev, "Display ASSR: %d, TC358767 ASSR: %d\n",
+               tc->link.assr, tc->assr);
+
+       return 0;
+
+err_dpcd_read:
+       dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
+       return ret;
+err_dpcd_inval:
+       dev_err(tc->dev, "invalid DPCD\n");
+       return -EINVAL;
+}
+
+static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
+{
+       int ret;
+       int vid_sync_dly;
+       int max_tu_symbol;
+
+       int left_margin = mode->htotal - mode->hsync_end;
+       int right_margin = mode->hsync_start - mode->hdisplay;
+       int hsync_len = mode->hsync_end - mode->hsync_start;
+       int upper_margin = mode->vtotal - mode->vsync_end;
+       int lower_margin = mode->vsync_start - mode->vdisplay;
+       int vsync_len = mode->vsync_end - mode->vsync_start;
+
+       dev_dbg(tc->dev, "set mode %dx%d\n",
+               mode->hdisplay, mode->vdisplay);
+       dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
+               left_margin, right_margin, hsync_len);
+       dev_dbg(tc->dev, "V margin %d,%d sync %d\n",
+               upper_margin, lower_margin, vsync_len);
+       dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
+
+
+       /* LCD Ctl Frame Size */
+       tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+                OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
+       tc_write(HTIM01, (left_margin << 16) |          /* H back porch */
+                        (hsync_len << 0));             /* Hsync */
+       tc_write(HTIM02, (right_margin << 16) |         /* H front porch */
+                        (mode->hdisplay << 0));        /* width */
+       tc_write(VTIM01, (upper_margin << 16) |         /* V back porch */
+                        (vsync_len << 0));             /* Vsync */
+       tc_write(VTIM02, (lower_margin << 16) |         /* V front porch */
+                        (mode->vdisplay << 0));        /* height */
+       tc_write(VFUEN0, VFUEN);                /* update settings */
+
+       /* Test pattern settings */
+       tc_write(TSTCTL,
+                (120 << 24) |  /* Red Color component value */
+                (20 << 16) |   /* Green Color component value */
+                (99 << 8) |    /* Blue Color component value */
+                (1 << 4) |     /* Enable I2C Filter */
+                (2 << 0) |     /* Color bar Mode */
+                0);
+
+       /* DP Main Stream Attributes */
+       vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
+       tc_write(DP0_VIDSYNCDELAY,
+                (0x003e << 16) |       /* thresh_dly */
+                (vid_sync_dly << 0));
+
+       tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
+
+       tc_write(DP0_STARTVAL,
+                ((upper_margin + vsync_len) << 16) |
+                ((left_margin + hsync_len) << 0));
+
+       tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
+
+       tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
+
+       tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
+                DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
+
+       /*
+        * Recommended maximum number of symbols transferred in a transfer unit:
+        * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+        *              (output active video bandwidth in bytes))
+        * Must be less than tu_size.
+        */
+       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+       tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+
+       return 0;
+err:
+       return ret;
+}
+
+static int tc_link_training(struct tc_data *tc, int pattern)
+{
+       const char * const *errors;
+       u32 srcctrl = tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+                     DP0_SRCCTRL_AUTOCORRECT;
+       int timeout;
+       int retry;
+       u32 value;
+       int ret;
+
+       if (pattern == DP_TRAINING_PATTERN_1) {
+               srcctrl |= DP0_SRCCTRL_TP1;
+               errors = training_pattern1_errors;
+       } else {
+               srcctrl |= DP0_SRCCTRL_TP2;
+               errors = training_pattern2_errors;
+       }
+
+       /* Set DPCD 0x102 for Training Part 1 or 2 */
+       tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE | pattern);
+
+       tc_write(DP0_LTLOOPCTRL,
+                (0x0f << 28) | /* Defer Iteration Count */
+                (0x0f << 24) | /* Loop Iteration Count */
+                (0x0d << 0));  /* Loop Timer Delay */
+
+       retry = 5;
+       do {
+               /* Set DP0 Training Pattern */
+               tc_write(DP0_SRCCTRL, srcctrl);
+
+               /* Enable DP0 to start Link Training */
+               tc_write(DP0CTL, DP_EN);
+
+               /* wait */
+               timeout = 1000;
+               do {
+                       tc_read(DP0_LTSTAT, &value);
+                       udelay(1);
+               } while ((!(value & LT_LOOPDONE)) && (--timeout));
+               if (timeout == 0) {
+                       dev_err(tc->dev, "Link training timeout!\n");
+               } else {
+                       int pattern = (value >> 11) & 0x3;
+                       int error = (value >> 8) & 0x7;
+
+                       dev_dbg(tc->dev,
+                               "Link training phase %d done after %d uS: %s\n",
+                               pattern, 1000 - timeout, errors[error]);
+                       if (pattern == DP_TRAINING_PATTERN_1 && error == 0)
+                               break;
+                       if (pattern == DP_TRAINING_PATTERN_2) {
+                               value &= LT_CHANNEL1_EQ_BITS |
+                                        LT_INTERLANE_ALIGN_DONE |
+                                        LT_CHANNEL0_EQ_BITS;
+                               /* in case of two lanes */
+                               if ((tc->link.base.num_lanes == 2) &&
+                                   (value == (LT_CHANNEL1_EQ_BITS |
+                                              LT_INTERLANE_ALIGN_DONE |
+                                              LT_CHANNEL0_EQ_BITS)))
+                                       break;
+                               /* in case of one line */
+                               if ((tc->link.base.num_lanes == 1) &&
+                                   (value == (LT_INTERLANE_ALIGN_DONE |
+                                              LT_CHANNEL0_EQ_BITS)))
+                                       break;
+                       }
+               }
+               /* restart */
+               tc_write(DP0CTL, 0);
+               usleep_range(10, 20);
+       } while (--retry);
+       if (retry == 0) {
+               dev_err(tc->dev, "Failed to finish training phase %d\n",
+                       pattern);
+       }
+
+       return 0;
+err:
+       return ret;
+}
+
+static int tc_main_link_setup(struct tc_data *tc)
+{
+       struct drm_dp_aux *aux = &tc->aux;
+       struct device *dev = tc->dev;
+       unsigned int rate;
+       u32 dp_phy_ctrl;
+       int timeout;
+       bool aligned;
+       bool ready;
+       u32 value;
+       int ret;
+       u8 tmp[8];
+
+       /* display mode should be set at this point */
+       if (!tc->mode)
+               return -EINVAL;
+
+       /* from excel file - DP0_SrcCtrl */
+       tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
+                DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
+                DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
+       /* from excel file - DP1_SrcCtrl */
+       tc_write(0x07a0, 0x00003083);
+
+       rate = clk_get_rate(tc->refclk);
+       switch (rate) {
+       case 38400000:
+               value = REF_FREQ_38M4;
+               break;
+       case 26000000:
+               value = REF_FREQ_26M;
+               break;
+       case 19200000:
+               value = REF_FREQ_19M2;
+               break;
+       case 13000000:
+               value = REF_FREQ_13M;
+               break;
+       default:
+               return -EINVAL;
+       }
+       value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
+       tc_write(SYS_PLLPARAM, value);
+       /* Setup Main Link */
+       dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN |  PHY_M0_EN;
+       tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+       msleep(100);
+
+       /* PLL setup */
+       tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
+       tc_wait_pll_lock(tc);
+
+       tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
+       tc_wait_pll_lock(tc);
+
+       /* PXL PLL setup */
+       if (tc_test_pattern) {
+               ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
+                                   1000 * tc->mode->clock);
+               if (ret)
+                       goto err;
+       }
+
+       /* Reset/Enable Main Links */
+       dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST;
+       tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+       usleep_range(100, 200);
+       dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
+       tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+
+       timeout = 1000;
+       do {
+               tc_read(DP_PHY_CTRL, &value);
+               udelay(1);
+       } while ((!(value & PHY_RDY)) && (--timeout));
+
+       if (timeout == 0) {
+               dev_err(dev, "timeout waiting for phy become ready");
+               return -ETIMEDOUT;
+       }
+
+       /* Set misc: 8 bits per color */
+       ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8);
+       if (ret)
+               goto err;
+
+       /*
+        * ASSR mode
+        * on TC358767 side ASSR configured through strap pin
+        * seems there is no way to change this setting from SW
+        *
+        * check is tc configured for same mode
+        */
+       if (tc->assr != tc->link.assr) {
+               dev_dbg(dev, "Trying to set display to ASSR: %d\n",
+                       tc->assr);
+               /* try to set ASSR on display side */
+               tmp[0] = tc->assr;
+               ret = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, tmp[0]);
+               if (ret < 0)
+                       goto err_dpcd_read;
+               /* read back */
+               ret = drm_dp_dpcd_readb(aux, DP_EDP_CONFIGURATION_SET, tmp);
+               if (ret < 0)
+                       goto err_dpcd_read;
+
+               if (tmp[0] != tc->assr) {
+                       dev_warn(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
+                                tc->assr);
+                       /* trying with disabled scrambler */
+                       tc->link.scrambler_dis = 1;
+               }
+       }
+
+       /* Setup Link & DPRx Config for Training */
+       ret = drm_dp_link_configure(aux, &tc->link.base);
+       if (ret < 0)
+               goto err_dpcd_write;
+
+       /* DOWNSPREAD_CTRL */
+       tmp[0] = tc->link.spread ? DP_SPREAD_AMP_0_5 : 0x00;
+       /* MAIN_LINK_CHANNEL_CODING_SET */
+       tmp[1] =  tc->link.coding8b10b ? DP_SET_ANSI_8B10B : 0x00;
+       ret = drm_dp_dpcd_write(aux, DP_DOWNSPREAD_CTRL, tmp, 2);
+       if (ret < 0)
+               goto err_dpcd_write;
+
+       ret = tc_link_training(tc, DP_TRAINING_PATTERN_1);
+       if (ret)
+               goto err;
+
+       ret = tc_link_training(tc, DP_TRAINING_PATTERN_2);
+       if (ret)
+               goto err;
+
+       /* Clear DPCD 0x102 */
+       /* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */
+       tmp[0] = tc->link.scrambler_dis ? DP_LINK_SCRAMBLING_DISABLE : 0x00;
+       ret = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, tmp[0]);
+       if (ret < 0)
+               goto err_dpcd_write;
+
+       /* Clear Training Pattern, set AutoCorrect Mode = 1 */
+       tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT);
+
+       /* Wait */
+       timeout = 100;
+       do {
+               udelay(1);
+               /* Read DPCD 0x202-0x207 */
+               ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
+               if (ret < 0)
+                       goto err_dpcd_read;
+               ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
+                                    DP_CHANNEL_EQ_BITS));      /* Lane0 */
+               aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
+       } while ((--timeout) && !(ready && aligned));
+
+       if (timeout == 0) {
+               /* Read DPCD 0x200-0x201 */
+               ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
+               if (ret < 0)
+                       goto err_dpcd_read;
+               dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
+               dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
+                        tmp[1]);
+               dev_info(dev, "0x0202 LANE0_1_STATUS: 0x%02x\n", tmp[2]);
+               dev_info(dev, "0x0204 LANE_ALIGN_STATUS_UPDATED: 0x%02x\n",
+                        tmp[4]);
+               dev_info(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[5]);
+               dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
+                        tmp[6]);
+
+               if (!ready)
+                       dev_err(dev, "Lane0/1 not ready\n");
+               if (!aligned)
+                       dev_err(dev, "Lane0/1 not aligned\n");
+               return -EAGAIN;
+       }
+
+       ret = tc_set_video_mode(tc, tc->mode);
+       if (ret)
+               goto err;
+
+       /* Set M/N */
+       ret = tc_stream_clock_calc(tc);
+       if (ret)
+               goto err;
+
+       return 0;
+err_dpcd_read:
+       dev_err(tc->dev, "Failed to read DPCD: %d\n", ret);
+       return ret;
+err_dpcd_write:
+       dev_err(tc->dev, "Failed to write DPCD: %d\n", ret);
+err:
+       return ret;
+}
+
+static int tc_main_link_stream(struct tc_data *tc, int state)
+{
+       int ret;
+       u32 value;
+
+       dev_dbg(tc->dev, "stream: %d\n", state);
+
+       if (state) {
+               value = VID_MN_GEN | DP_EN;
+               if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+                       value |= EF_EN;
+               tc_write(DP0CTL, value);
+               /*
+                * VID_EN assertion should be delayed by at least N * LSCLK
+                * cycles from the time VID_MN_GEN is enabled in order to
+                * generate stable values for VID_M. LSCLK is 270 MHz or
+                * 162 MHz, VID_N is set to 32768 in  tc_stream_clock_calc(),
+                * so a delay of at least 203 us should suffice.
+                */
+               usleep_range(500, 1000);
+               value |= VID_EN;
+               tc_write(DP0CTL, value);
+               /* Set input interface */
+               value = DP0_AUDSRC_NO_INPUT;
+               if (tc_test_pattern)
+                       value |= DP0_VIDSRC_COLOR_BAR;
+               else
+                       value |= DP0_VIDSRC_DPI_RX;
+               tc_write(SYSCTRL, value);
+       } else {
+               tc_write(DP0CTL, 0);
+       }
+
+       return 0;
+err:
+       return ret;
+}
+
+static enum drm_connector_status
+tc_connector_detect(struct drm_connector *connector, bool force)
+{
+       return connector_status_connected;
+}
+
+static void tc_bridge_pre_enable(struct drm_bridge *bridge)
+{
+       struct tc_data *tc = bridge_to_tc(bridge);
+
+       drm_panel_prepare(tc->panel);
+}
+
+static void tc_bridge_enable(struct drm_bridge *bridge)
+{
+       struct tc_data *tc = bridge_to_tc(bridge);
+       int ret;
+
+       ret = tc_main_link_setup(tc);
+       if (ret < 0) {
+               dev_err(tc->dev, "main link setup error: %d\n", ret);
+               return;
+       }
+
+       ret = tc_main_link_stream(tc, 1);
+       if (ret < 0) {
+               dev_err(tc->dev, "main link stream start error: %d\n", ret);
+               return;
+       }
+
+       drm_panel_enable(tc->panel);
+}
+
+static void tc_bridge_disable(struct drm_bridge *bridge)
+{
+       struct tc_data *tc = bridge_to_tc(bridge);
+       int ret;
+
+       drm_panel_disable(tc->panel);
+
+       ret = tc_main_link_stream(tc, 0);
+       if (ret < 0)
+               dev_err(tc->dev, "main link stream stop error: %d\n", ret);
+}
+
+static void tc_bridge_post_disable(struct drm_bridge *bridge)
+{
+       struct tc_data *tc = bridge_to_tc(bridge);
+
+       drm_panel_unprepare(tc->panel);
+}
+
+static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
+                                const struct drm_display_mode *mode,
+                                struct drm_display_mode *adj)
+{
+       /* Fixup sync polarities, both hsync and vsync are active low */
+       adj->flags = mode->flags;
+       adj->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+       adj->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+
+       return true;
+}
+
+static int tc_connector_mode_valid(struct drm_connector *connector,
+                                  struct drm_display_mode *mode)
+{
+       /* Accept any mode */
+       return MODE_OK;
+}
+
+static void tc_bridge_mode_set(struct drm_bridge *bridge,
+                              struct drm_display_mode *mode,
+                              struct drm_display_mode *adj)
+{
+       struct tc_data *tc = bridge_to_tc(bridge);
+
+       tc->mode = mode;
+}
+
+static int tc_connector_get_modes(struct drm_connector *connector)
+{
+       struct tc_data *tc = connector_to_tc(connector);
+       struct edid *edid;
+       unsigned int count;
+
+       if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
+               count = tc->panel->funcs->get_modes(tc->panel);
+               if (count > 0)
+                       return count;
+       }
+
+       edid = drm_get_edid(connector, &tc->aux.ddc);
+
+       kfree(tc->edid);
+       tc->edid = edid;
+       if (!edid)
+               return 0;
+
+       drm_mode_connector_update_edid_property(connector, edid);
+       count = drm_add_edid_modes(connector, edid);
+
+       return count;
+}
+
+static void tc_connector_set_polling(struct tc_data *tc,
+                                    struct drm_connector *connector)
+{
+       /* TODO: add support for HPD */
+       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                           DRM_CONNECTOR_POLL_DISCONNECT;
+}
+
+static struct drm_encoder *
+tc_connector_best_encoder(struct drm_connector *connector)
+{
+       struct tc_data *tc = connector_to_tc(connector);
+
+       return tc->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
+       .get_modes = tc_connector_get_modes,
+       .mode_valid = tc_connector_mode_valid,
+       .best_encoder = tc_connector_best_encoder,
+};
+
+static void tc_connector_destroy(struct drm_connector *connector)
+{
+       drm_connector_unregister(connector);
+       drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tc_connector_funcs = {
+       .dpms = drm_atomic_helper_connector_dpms,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .detect = tc_connector_detect,
+       .destroy = tc_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int tc_bridge_attach(struct drm_bridge *bridge)
+{
+       u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+       struct tc_data *tc = bridge_to_tc(bridge);
+       struct drm_device *drm = bridge->dev;
+       int ret;
+
+       /* Create eDP connector */
+       drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
+       ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
+                                DRM_MODE_CONNECTOR_eDP);
+       if (ret)
+               return ret;
+
+       if (tc->panel)
+               drm_panel_attach(tc->panel, &tc->connector);
+
+       drm_display_info_set_bus_formats(&tc->connector.display_info,
+                                        &bus_format, 1);
+       drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
+
+       return 0;
+}
+
+static const struct drm_bridge_funcs tc_bridge_funcs = {
+       .attach = tc_bridge_attach,
+       .mode_set = tc_bridge_mode_set,
+       .pre_enable = tc_bridge_pre_enable,
+       .enable = tc_bridge_enable,
+       .disable = tc_bridge_disable,
+       .post_disable = tc_bridge_post_disable,
+       .mode_fixup = tc_bridge_mode_fixup,
+};
+
+static bool tc_readable_reg(struct device *dev, unsigned int reg)
+{
+       return reg != SYSCTRL;
+}
+
+static const struct regmap_range tc_volatile_ranges[] = {
+       regmap_reg_range(DP0_AUXWDATA(0), DP0_AUXSTATUS),
+       regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
+       regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL),
+       regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL),
+       regmap_reg_range(VFUEN0, VFUEN0),
+};
+
+static const struct regmap_access_table tc_volatile_table = {
+       .yes_ranges = tc_volatile_ranges,
+       .n_yes_ranges = ARRAY_SIZE(tc_volatile_ranges),
+};
+
+static bool tc_writeable_reg(struct device *dev, unsigned int reg)
+{
+       return (reg != TC_IDREG) &&
+              (reg != DP0_LTSTAT) &&
+              (reg != DP0_SNKLTCHGREQ);
+}
+
+static const struct regmap_config tc_regmap_config = {
+       .name = "tc358767",
+       .reg_bits = 16,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = PLL_DBG,
+       .cache_type = REGCACHE_RBTREE,
+       .readable_reg = tc_readable_reg,
+       .volatile_table = &tc_volatile_table,
+       .writeable_reg = tc_writeable_reg,
+       .reg_format_endian = REGMAP_ENDIAN_BIG,
+       .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct device_node *ep;
+       struct tc_data *tc;
+       int ret;
+
+       tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+       if (!tc)
+               return -ENOMEM;
+
+       tc->dev = dev;
+
+       /* port@2 is the output port */
+       ep = of_graph_get_endpoint_by_regs(dev->of_node, 2, -1);
+       if (ep) {
+               struct device_node *remote;
+
+               remote = of_graph_get_remote_port_parent(ep);
+               if (!remote) {
+                       dev_warn(dev, "endpoint %s not connected\n",
+                                ep->full_name);
+                       of_node_put(ep);
+                       return -ENODEV;
+               }
+               of_node_put(ep);
+               tc->panel = of_drm_find_panel(remote);
+               if (tc->panel) {
+                       dev_dbg(dev, "found panel %s\n", remote->full_name);
+               } else {
+                       dev_dbg(dev, "waiting for panel %s\n",
+                               remote->full_name);
+                       of_node_put(remote);
+                       return -EPROBE_DEFER;
+               }
+               of_node_put(remote);
+       }
+
+       /* Shut down GPIO is optional */
+       tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
+       if (IS_ERR(tc->sd_gpio))
+               return PTR_ERR(tc->sd_gpio);
+
+       if (tc->sd_gpio) {
+               gpiod_set_value_cansleep(tc->sd_gpio, 0);
+               usleep_range(5000, 10000);
+       }
+
+       /* Reset GPIO is optional */
+       tc->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(tc->reset_gpio))
+               return PTR_ERR(tc->reset_gpio);
+
+       if (tc->reset_gpio) {
+               gpiod_set_value_cansleep(tc->reset_gpio, 1);
+               usleep_range(5000, 10000);
+       }
+
+       tc->refclk = devm_clk_get(dev, "ref");
+       if (IS_ERR(tc->refclk)) {
+               ret = PTR_ERR(tc->refclk);
+               dev_err(dev, "Failed to get refclk: %d\n", ret);
+               return ret;
+       }
+
+       tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config);
+       if (IS_ERR(tc->regmap)) {
+               ret = PTR_ERR(tc->regmap);
+               dev_err(dev, "Failed to initialize regmap: %d\n", ret);
+               return ret;
+       }
+
+       ret = regmap_read(tc->regmap, TC_IDREG, &tc->rev);
+       if (ret) {
+               dev_err(tc->dev, "can not read device ID: %d\n", ret);
+               return ret;
+       }
+
+       if ((tc->rev != 0x6601) && (tc->rev != 0x6603)) {
+               dev_err(tc->dev, "invalid device ID: 0x%08x\n", tc->rev);
+               return -EINVAL;
+       }
+
+       tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */
+
+       ret = tc_aux_link_setup(tc);
+       if (ret)
+               return ret;
+
+       /* Register DP AUX channel */
+       tc->aux.name = "TC358767 AUX i2c adapter";
+       tc->aux.dev = tc->dev;
+       tc->aux.transfer = tc_aux_transfer;
+       ret = drm_dp_aux_register(&tc->aux);
+       if (ret)
+               return ret;
+
+       ret = tc_get_display_props(tc);
+       if (ret)
+               goto err_unregister_aux;
+
+       tc_connector_set_polling(tc, &tc->connector);
+
+       tc->bridge.funcs = &tc_bridge_funcs;
+       tc->bridge.of_node = dev->of_node;
+       ret = drm_bridge_add(&tc->bridge);
+       if (ret) {
+               dev_err(dev, "Failed to add drm_bridge: %d\n", ret);
+               goto err_unregister_aux;
+       }
+
+       i2c_set_clientdata(client, tc);
+
+       return 0;
+err_unregister_aux:
+       drm_dp_aux_unregister(&tc->aux);
+       return ret;
+}
+
+static int tc_remove(struct i2c_client *client)
+{
+       struct tc_data *tc = i2c_get_clientdata(client);
+
+       drm_bridge_remove(&tc->bridge);
+       drm_dp_aux_unregister(&tc->aux);
+
+       tc_pxl_pll_dis(tc);
+
+       return 0;
+}
+
+static const struct i2c_device_id tc358767_i2c_ids[] = {
+       { "tc358767", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids);
+
+static const struct of_device_id tc358767_of_ids[] = {
+       { .compatible = "toshiba,tc358767", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, tc358767_of_ids);
+
+static struct i2c_driver tc358767_driver = {
+       .driver = {
+               .name = "tc358767",
+               .of_match_table = tc358767_of_ids,
+       },
+       .id_table = tc358767_i2c_ids,
+       .probe = tc_probe,
+       .remove = tc_remove,
+};
+module_i2c_driver(tc358767_driver);
+
+MODULE_AUTHOR("Andrey Gusakov <andrey.gusakov@cogentembedded.com>");
+MODULE_DESCRIPTION("tc358767 eDP encoder driver");
+MODULE_LICENSE("GPL");
index 9864559e5fb994ec47e9b5ceb13246c9682491fe..04b3c161dfae6fc9c643229f2a312eb44abca62a 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_CIRRUS_QEMU
        tristate "Cirrus driver for QEMU emulated device"
        depends on DRM && PCI
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        help
         This is a KMS driver for emulated cirrus device in qemu.
index dc83f69da6f1c6513cc797ac0776ce24e3d0ba9a..b05f7eae32ce756e39f656adcbadc2ddfb55893c 100644 (file)
@@ -142,7 +142,7 @@ static struct drm_driver driver = {
        .major = DRIVER_MAJOR,
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
-       .gem_free_object = cirrus_gem_free_object,
+       .gem_free_object_unlocked = cirrus_gem_free_object,
        .dumb_create = cirrus_dumb_create,
        .dumb_map_offset = cirrus_dumb_mmap_offset,
        .dumb_destroy = drm_gem_dumb_destroy,
index 32d32c5b7b17448bdb252301d5cea65f3e3cacbf..80446e2d3ab6efd00f3d3e777e4178b26ee6ae5d 100644 (file)
@@ -17,8 +17,8 @@
 static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
        struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
-       if (cirrus_fb->obj)
-               drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+
+       drm_gem_object_unreference_unlocked(cirrus_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(fb);
 }
index d3d8d7bfcc5765e812bd85ded2e0d14d39c3cfb4..17c915d9a03e66731f9ec3451d261b08f9743d44 100644 (file)
@@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc)
  * use this for 8-bit mode so can't perform smooth fades on deeper modes,
  * but it's a requirement that we provide the function
  */
-static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-                                 u16 *blue, uint32_t start, uint32_t size)
+static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                u16 *blue, uint32_t size)
 {
        struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
        int i;
 
-       if (size != CIRRUS_LUT_SIZE)
-               return;
-
-       for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+       for (i = 0; i < size; i++) {
                cirrus_crtc->lut_r[i] = red[i];
                cirrus_crtc->lut_g[i] = green[i];
                cirrus_crtc->lut_b[i] = blue[i];
        }
        cirrus_crtc_load_lut(crtc);
+
+       return 0;
 }
 
 /* Simple cleanup function */
index 6768b7b1af32d98bc26c259870ac471983e4a62d..1cc9ee607128772295bf6f49c8ba9ad93f73abd1 100644 (file)
@@ -186,17 +186,6 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
 {
 }
 
-static int cirrus_bo_move(struct ttm_buffer_object *bo,
-                      bool evict, bool interruptible,
-                      bool no_wait_gpu,
-                      struct ttm_mem_reg *new_mem)
-{
-       int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-       return r;
-}
-
-
 static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
 {
        ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
        .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
        .init_mem_type = cirrus_bo_init_mem_type,
        .evict_flags = cirrus_bo_evict_flags,
-       .move = cirrus_bo_move,
+       .move = NULL,
        .verify_access = cirrus_bo_verify_access,
        .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
        .io_mem_free = &cirrus_ttm_io_mem_free,
index 3ff1ed7b33dbeb74aa036204f90931958eb5ee23..8d2f111fa113363e83218f641f677435b7da832b 100644 (file)
 
 #include "drm_crtc_internal.h"
 
+static void crtc_commit_free(struct kref *kref)
+{
+       struct drm_crtc_commit *commit =
+               container_of(kref, struct drm_crtc_commit, ref);
+
+       kfree(commit);
+}
+
+void drm_crtc_commit_put(struct drm_crtc_commit *commit)
+{
+       kref_put(&commit->ref, crtc_commit_free);
+}
+EXPORT_SYMBOL(drm_crtc_commit_put);
+
 /**
  * drm_atomic_state_default_release -
  * release memory initialized by drm_atomic_state_init
 void drm_atomic_state_default_release(struct drm_atomic_state *state)
 {
        kfree(state->connectors);
-       kfree(state->connector_states);
        kfree(state->crtcs);
-       kfree(state->crtc_states);
        kfree(state->planes);
-       kfree(state->plane_states);
 }
 EXPORT_SYMBOL(drm_atomic_state_default_release);
 
@@ -72,18 +83,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
                               sizeof(*state->crtcs), GFP_KERNEL);
        if (!state->crtcs)
                goto fail;
-       state->crtc_states = kcalloc(dev->mode_config.num_crtc,
-                                    sizeof(*state->crtc_states), GFP_KERNEL);
-       if (!state->crtc_states)
-               goto fail;
        state->planes = kcalloc(dev->mode_config.num_total_plane,
                                sizeof(*state->planes), GFP_KERNEL);
        if (!state->planes)
                goto fail;
-       state->plane_states = kcalloc(dev->mode_config.num_total_plane,
-                                     sizeof(*state->plane_states), GFP_KERNEL);
-       if (!state->plane_states)
-               goto fail;
 
        state->dev = dev;
 
@@ -139,40 +142,48 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
        DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
 
        for (i = 0; i < state->num_connector; i++) {
-               struct drm_connector *connector = state->connectors[i];
+               struct drm_connector *connector = state->connectors[i].ptr;
 
                if (!connector)
                        continue;
 
                connector->funcs->atomic_destroy_state(connector,
-                                                      state->connector_states[i]);
-               state->connectors[i] = NULL;
-               state->connector_states[i] = NULL;
+                                                      state->connectors[i].state);
+               state->connectors[i].ptr = NULL;
+               state->connectors[i].state = NULL;
                drm_connector_unreference(connector);
        }
 
        for (i = 0; i < config->num_crtc; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
+               struct drm_crtc *crtc = state->crtcs[i].ptr;
 
                if (!crtc)
                        continue;
 
                crtc->funcs->atomic_destroy_state(crtc,
-                                                 state->crtc_states[i]);
-               state->crtcs[i] = NULL;
-               state->crtc_states[i] = NULL;
+                                                 state->crtcs[i].state);
+
+               if (state->crtcs[i].commit) {
+                       kfree(state->crtcs[i].commit->event);
+                       state->crtcs[i].commit->event = NULL;
+                       drm_crtc_commit_put(state->crtcs[i].commit);
+               }
+
+               state->crtcs[i].commit = NULL;
+               state->crtcs[i].ptr = NULL;
+               state->crtcs[i].state = NULL;
        }
 
        for (i = 0; i < config->num_total_plane; i++) {
-               struct drm_plane *plane = state->planes[i];
+               struct drm_plane *plane = state->planes[i].ptr;
 
                if (!plane)
                        continue;
 
                plane->funcs->atomic_destroy_state(plane,
-                                                  state->plane_states[i]);
-               state->planes[i] = NULL;
-               state->plane_states[i] = NULL;
+                                                  state->planes[i].state);
+               state->planes[i].ptr = NULL;
+               state->planes[i].state = NULL;
        }
 }
 EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -270,8 +281,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
        if (!crtc_state)
                return ERR_PTR(-ENOMEM);
 
-       state->crtc_states[index] = crtc_state;
-       state->crtcs[index] = crtc;
+       state->crtcs[index].state = crtc_state;
+       state->crtcs[index].ptr = crtc;
        crtc_state->state = state;
 
        DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
@@ -351,6 +362,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
        drm_property_unreference_blob(state->mode_blob);
        state->mode_blob = NULL;
 
+       memset(&state->mode, 0, sizeof(state->mode));
+
        if (blob) {
                if (blob->length != sizeof(struct drm_mode_modeinfo) ||
                    drm_mode_convert_umode(&state->mode,
@@ -363,7 +376,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
                DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
                                 state->mode.name, state);
        } else {
-               memset(&state->mode, 0, sizeof(state->mode));
                state->enable = false;
                DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
                                 state);
@@ -392,8 +404,7 @@ drm_atomic_replace_property_blob(struct drm_property_blob **blob,
        if (old_blob == new_blob)
                return;
 
-       if (old_blob)
-               drm_property_unreference_blob(old_blob);
+       drm_property_unreference_blob(old_blob);
        if (new_blob)
                drm_property_reference_blob(new_blob);
        *blob = new_blob;
@@ -631,8 +642,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
        if (!plane_state)
                return ERR_PTR(-ENOMEM);
 
-       state->plane_states[index] = plane_state;
-       state->planes[index] = plane;
+       state->planes[index].state = plane_state;
+       state->planes[index].ptr = plane;
        plane_state->state = state;
 
        DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
@@ -896,8 +907,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
        index = drm_connector_index(connector);
 
        if (index >= state->num_connector) {
-               struct drm_connector **c;
-               struct drm_connector_state **cs;
+               struct __drm_connnectors_state *c;
                int alloc = max(index + 1, config->num_connector);
 
                c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
@@ -908,26 +918,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
                memset(&state->connectors[state->num_connector], 0,
                       sizeof(*state->connectors) * (alloc - state->num_connector));
 
-               cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
-               if (!cs)
-                       return ERR_PTR(-ENOMEM);
-
-               state->connector_states = cs;
-               memset(&state->connector_states[state->num_connector], 0,
-                      sizeof(*state->connector_states) * (alloc - state->num_connector));
                state->num_connector = alloc;
        }
 
-       if (state->connector_states[index])
-               return state->connector_states[index];
+       if (state->connectors[index].state)
+               return state->connectors[index].state;
 
        connector_state = connector->funcs->atomic_duplicate_state(connector);
        if (!connector_state)
                return ERR_PTR(-ENOMEM);
 
        drm_connector_reference(connector);
-       state->connector_states[index] = connector_state;
-       state->connectors[index] = connector;
+       state->connectors[index].state = connector_state;
+       state->connectors[index].ptr = connector;
        connector_state->state = state;
 
        DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
@@ -1295,14 +1298,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  */
 void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
 {
+       struct drm_device *dev = state->dev;
+       unsigned crtc_mask = 0;
+       struct drm_crtc *crtc;
        int ret;
+       bool global = false;
+
+       drm_for_each_crtc(crtc, dev) {
+               if (crtc->acquire_ctx != state->acquire_ctx)
+                       continue;
+
+               crtc_mask |= drm_crtc_mask(crtc);
+               crtc->acquire_ctx = NULL;
+       }
+
+       if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
+               global = true;
+
+               dev->mode_config.acquire_ctx = NULL;
+       }
 
 retry:
        drm_modeset_backoff(state->acquire_ctx);
 
-       ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
+       ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
        if (ret)
                goto retry;
+
+       drm_for_each_crtc(crtc, dev)
+               if (drm_crtc_mask(crtc) & crtc_mask)
+                       crtc->acquire_ctx = state->acquire_ctx;
+
+       if (global)
+               dev->mode_config.acquire_ctx = state->acquire_ctx;
 }
 EXPORT_SYMBOL(drm_atomic_legacy_backoff);
 
@@ -1431,7 +1459,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  */
 
 static struct drm_pending_vblank_event *create_vblank_event(
-               struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
+               struct drm_device *dev, struct drm_file *file_priv,
+               struct fence *fence, uint64_t user_data)
 {
        struct drm_pending_vblank_event *e = NULL;
        int ret;
@@ -1444,12 +1473,17 @@ static struct drm_pending_vblank_event *create_vblank_event(
        e->event.base.length = sizeof(e->event);
        e->event.user_data = user_data;
 
-       ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
-       if (ret) {
-               kfree(e);
-               return NULL;
+       if (file_priv) {
+               ret = drm_event_reserve_init(dev, file_priv, &e->base,
+                                            &e->event.base);
+               if (ret) {
+                       kfree(e);
+                       return NULL;
+               }
        }
 
+       e->base.fence = fence;
+
        return e;
 }
 
@@ -1689,7 +1723,8 @@ retry:
                for_each_crtc_in_state(state, crtc, crtc_state, i) {
                        struct drm_pending_vblank_event *e;
 
-                       e = create_vblank_event(dev, file_priv, arg->user_data);
+                       e = create_vblank_event(dev, file_priv, NULL,
+                                               arg->user_data);
                        if (!e) {
                                ret = -ENOMEM;
                                goto out;
index ddfa0d120e398a63f755c677dcdb1b5e58f371e5..de7fddce3cefac5a00de3fb95b94578585b9ce8d 100644 (file)
@@ -110,8 +110,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
 
                if (funcs->atomic_best_encoder)
                        new_encoder = funcs->atomic_best_encoder(connector, conn_state);
-               else
+               else if (funcs->best_encoder)
                        new_encoder = funcs->best_encoder(connector);
+               else
+                       new_encoder = drm_atomic_helper_best_encoder(connector);
 
                if (new_encoder) {
                        if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
@@ -298,8 +300,10 @@ update_connector_routing(struct drm_atomic_state *state,
        if (funcs->atomic_best_encoder)
                new_encoder = funcs->atomic_best_encoder(connector,
                                                         connector_state);
-       else
+       else if (funcs->best_encoder)
                new_encoder = funcs->best_encoder(connector);
+       else
+               new_encoder = drm_atomic_helper_best_encoder(connector);
 
        if (!new_encoder) {
                DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -414,6 +418,9 @@ mode_fixup(struct drm_atomic_state *state)
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
                const struct drm_crtc_helper_funcs *funcs;
 
+               if (!crtc_state->enable)
+                       continue;
+
                if (!crtc_state->mode_changed &&
                    !crtc_state->connectors_changed)
                        continue;
@@ -458,7 +465,7 @@ mode_fixup(struct drm_atomic_state *state)
  * times for the same update, e.g. when the ->atomic_check functions depend upon
  * the adjusted dotclock for fifo space allocation and watermark computation.
  *
- * RETURNS
+ * RETURNS:
  * Zero for success or -errno
  */
 int
@@ -572,7 +579,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
  * It also sets crtc_state->planes_changed to indicate that a crtc has
  * updated planes.
  *
- * RETURNS
+ * RETURNS:
  * Zero for success or -errno
  */
 int
@@ -611,7 +618,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
                if (!funcs || !funcs->atomic_check)
                        continue;
 
-               ret = funcs->atomic_check(crtc, state->crtc_states[i]);
+               ret = funcs->atomic_check(crtc, crtc_state);
                if (ret) {
                        DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
                                         crtc->base.id, crtc->name);
@@ -640,7 +647,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
  * ->atomic_check functions depend upon an updated adjusted_mode.clock to
  * e.g. properly compute watermarks.
  *
- * RETURNS
+ * RETURNS:
  * Zero for success or -errno
  */
 int drm_atomic_helper_check(struct drm_device *dev,
@@ -1113,22 +1120,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
 EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
 
 /**
- * drm_atomic_helper_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblocking: whether nonblocking behavior is requested.
+ * drm_atomic_helper_commit_tail - commit atomic update to hardware
+ * @state: new modeset state to be committed
  *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement nonblocking commits.
+ * This is the default implemenation for the ->atomic_commit_tail() hook of the
+ * &drm_mode_config_helper_funcs vtable.
  *
- * Note that right now this function does not support nonblocking commits, hence
- * driver writers must implement their own version for now. Also note that the
- * default ordering of how the various stages are called is to match the legacy
- * modeset helper library closest. One peculiarity of that is that it doesn't
- * mesh well with runtime PM at all.
+ * Note that the default ordering of how the various stages are called is to
+ * match the legacy modeset helper library closest. One peculiarity of that is
+ * that it doesn't mesh well with runtime PM at all.
  *
- * For drivers supporting runtime PM the recommended sequence is
+ * For drivers supporting runtime PM the recommended sequence is instead ::
  *
  *     drm_atomic_helper_commit_modeset_disables(dev, state);
  *
@@ -1136,9 +1138,75 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
  *
  *     drm_atomic_helper_commit_planes(dev, state, true);
  *
- * See the kerneldoc entries for these three functions for more details.
+ * for committing the atomic update to hardware.  See the kerneldoc entries for
+ * these three functions for more details.
+ */
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+
+       drm_atomic_helper_commit_modeset_disables(dev, state);
+
+       drm_atomic_helper_commit_planes(dev, state, false);
+
+       drm_atomic_helper_commit_modeset_enables(dev, state);
+
+       drm_atomic_helper_commit_hw_done(state);
+
+       drm_atomic_helper_wait_for_vblanks(dev, state);
+
+       drm_atomic_helper_cleanup_planes(dev, state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
+
+static void commit_tail(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+       struct drm_mode_config_helper_funcs *funcs;
+
+       funcs = dev->mode_config.helper_private;
+
+       drm_atomic_helper_wait_for_fences(dev, state);
+
+       drm_atomic_helper_wait_for_dependencies(state);
+
+       if (funcs && funcs->atomic_commit_tail)
+               funcs->atomic_commit_tail(state);
+       else
+               drm_atomic_helper_commit_tail(state);
+
+       drm_atomic_helper_commit_cleanup_done(state);
+
+       drm_atomic_state_free(state);
+}
+
+static void commit_work(struct work_struct *work)
+{
+       struct drm_atomic_state *state = container_of(work,
+                                                     struct drm_atomic_state,
+                                                     commit_work);
+       commit_tail(state);
+}
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @nonblock: whether nonblocking behavior is requested.
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails. This
+ * function implements nonblocking commits, using
+ * drm_atomic_helper_setup_commit() and related functions.
+ *
+ * Note that right now this function does not support nonblocking commits, hence
+ * driver writers must implement their own version for now.
+ *
+ * Committing the actual hardware state is done through the
+ * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable,
+ * or it's default implementation drm_atomic_helper_commit_tail().
  *
- * RETURNS
+ * RETURNS:
  * Zero for success or -errno.
  */
 int drm_atomic_helper_commit(struct drm_device *dev,
@@ -1147,8 +1215,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
 {
        int ret;
 
-       if (nonblock)
-               return -EBUSY;
+       ret = drm_atomic_helper_setup_commit(state, nonblock);
+       if (ret)
+               return ret;
+
+       INIT_WORK(&state->commit_work, commit_work);
 
        ret = drm_atomic_helper_prepare_planes(dev, state);
        if (ret)
@@ -1160,7 +1231,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
         * the software side now.
         */
 
-       drm_atomic_helper_swap_state(dev, state);
+       drm_atomic_helper_swap_state(state, true);
 
        /*
         * Everything below can be run asynchronously without the need to grab
@@ -1176,21 +1247,16 @@ int drm_atomic_helper_commit(struct drm_device *dev,
         * update. Which is important since compositors need to figure out the
         * composition of the next frame right after having submitted the
         * current layout.
+        *
+        * NOTE: Commit work has multiple phases, first hardware commit, then
+        * cleanup. We want them to overlap, hence need system_unbound_wq to
+        * make sure work items don't artifically stall on each another.
         */
 
-       drm_atomic_helper_wait_for_fences(dev, state);
-
-       drm_atomic_helper_commit_modeset_disables(dev, state);
-
-       drm_atomic_helper_commit_planes(dev, state, false);
-
-       drm_atomic_helper_commit_modeset_enables(dev, state);
-
-       drm_atomic_helper_wait_for_vblanks(dev, state);
-
-       drm_atomic_helper_cleanup_planes(dev, state);
-
-       drm_atomic_state_free(state);
+       if (nonblock)
+               queue_work(system_unbound_wq, &state->commit_work);
+       else
+               commit_tail(state);
 
        return 0;
 }
@@ -1199,12 +1265,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
 /**
  * DOC: implementing nonblocking commit
  *
- * For now the atomic helpers don't support nonblocking commit directly. If
- * there is real need it could be added though, using the dma-buf fence
- * infrastructure for generic synchronization with outstanding rendering.
- *
- * For now drivers have to implement nonblocking commit themselves, with the
- * following sequence being the recommended one:
+ * Nonblocking atomic commits have to be implemented in the following sequence:
  *
  * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
  * which commit needs to call which can fail, so we want to run it first and
@@ -1216,10 +1277,14 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
  * cancelled updates. Note that it is important to ensure that the framebuffer
  * cleanup is still done when cancelling.
  *
- * For sufficient parallelism it is recommended to have a work item per crtc
- * (for updates which don't touch global state) and a global one. Then we only
- * need to synchronize with the crtc work items for changed crtcs and the global
- * work item, which allows nice concurrent updates on disjoint sets of crtcs.
+ * Asynchronous workers need to have sufficient parallelism to be able to run
+ * different atomic commits on different CRTCs in parallel. The simplest way to
+ * achive this is by running them on the &system_unbound_wq work queue. Note
+ * that drivers are not required to split up atomic commits and run an
+ * individual commit in parallel - userspace is supposed to do that if it cares.
+ * But it might be beneficial to do that for modesets, since those necessarily
+ * must be done as one global operation, and enabling or disabling a CRTC can
+ * take a long time. But even that is not required.
  *
  * 3. The software state is updated synchronously with
  * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
@@ -1232,8 +1297,310 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
  * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
  * then cleaning up the framebuffers after the old framebuffer is no longer
  * being displayed.
+ *
+ * The above scheme is implemented in the atomic helper libraries in
+ * drm_atomic_helper_commit() using a bunch of helper functions. See
+ * drm_atomic_helper_setup_commit() for a starting point.
  */
 
+static int stall_checks(struct drm_crtc *crtc, bool nonblock)
+{
+       struct drm_crtc_commit *commit, *stall_commit = NULL;
+       bool completed = true;
+       int i;
+       long ret = 0;
+
+       spin_lock(&crtc->commit_lock);
+       i = 0;
+       list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
+               if (i == 0) {
+                       completed = try_wait_for_completion(&commit->flip_done);
+                       /* Userspace is not allowed to get ahead of the previous
+                        * commit with nonblocking ones. */
+                       if (!completed && nonblock) {
+                               spin_unlock(&crtc->commit_lock);
+                               return -EBUSY;
+                       }
+               } else if (i == 1) {
+                       stall_commit = commit;
+                       drm_crtc_commit_get(stall_commit);
+                       break;
+               }
+
+               i++;
+       }
+       spin_unlock(&crtc->commit_lock);
+
+       if (!stall_commit)
+               return 0;
+
+       /* We don't want to let commits get ahead of cleanup work too much,
+        * stalling on 2nd previous commit means triple-buffer won't ever stall.
+        */
+       ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
+                                                       10*HZ);
+       if (ret == 0)
+               DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
+                         crtc->base.id, crtc->name);
+
+       drm_crtc_commit_put(stall_commit);
+
+       return ret < 0 ? ret : 0;
+}
+
+/**
+ * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
+ * @state: new modeset state to be committed
+ * @nonblock: whether nonblocking behavior is requested.
+ *
+ * This function prepares @state to be used by the atomic helper's support for
+ * nonblocking commits. Drivers using the nonblocking commit infrastructure
+ * should always call this function from their ->atomic_commit hook.
+ *
+ * To be able to use this support drivers need to use a few more helper
+ * functions. drm_atomic_helper_wait_for_dependencies() must be called before
+ * actually committing the hardware state, and for nonblocking commits this call
+ * must be placed in the async worker. See also drm_atomic_helper_swap_state()
+ * and it's stall parameter, for when a driver's commit hooks look at the
+ * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly.
+ *
+ * Completion of the hardware commit step must be signalled using
+ * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
+ * to read or change any permanent software or hardware modeset state. The only
+ * exception is state protected by other means than &drm_modeset_lock locks.
+ * Only the free standing @state with pointers to the old state structures can
+ * be inspected, e.g. to clean up old buffers using
+ * drm_atomic_helper_cleanup_planes().
+ *
+ * At the very end, before cleaning up @state drivers must call
+ * drm_atomic_helper_commit_cleanup_done().
+ *
+ * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
+ * complete and esay-to-use default implementation of the atomic_commit() hook.
+ *
+ * The tracking of asynchronously executed and still pending commits is done
+ * using the core structure &drm_crtc_commit.
+ *
+ * By default there's no need to clean up resources allocated by this function
+ * explicitly: drm_atomic_state_default_clear() will take care of that
+ * automatically.
+ *
+ * Returns:
+ *
+ * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
+ * -ENOMEM on allocation failures and -EINTR when a signal is pending.
+ */
+int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
+                                  bool nonblock)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc_commit *commit;
+       int i, ret;
+
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+               if (!commit)
+                       return -ENOMEM;
+
+               init_completion(&commit->flip_done);
+               init_completion(&commit->hw_done);
+               init_completion(&commit->cleanup_done);
+               INIT_LIST_HEAD(&commit->commit_entry);
+               kref_init(&commit->ref);
+               commit->crtc = crtc;
+
+               state->crtcs[i].commit = commit;
+
+               ret = stall_checks(crtc, nonblock);
+               if (ret)
+                       return ret;
+
+               /* Drivers only send out events when at least either current or
+                * new CRTC state is active. Complete right away if everything
+                * stays off. */
+               if (!crtc->state->active && !crtc_state->active) {
+                       complete_all(&commit->flip_done);
+                       continue;
+               }
+
+               /* Legacy cursor updates are fully unsynced. */
+               if (state->legacy_cursor_update) {
+                       complete_all(&commit->flip_done);
+                       continue;
+               }
+
+               if (!crtc_state->event) {
+                       commit->event = kzalloc(sizeof(*commit->event),
+                                               GFP_KERNEL);
+                       if (!commit->event)
+                               return -ENOMEM;
+
+                       crtc_state->event = commit->event;
+               }
+
+               crtc_state->event->base.completion = &commit->flip_done;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
+
+
+static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
+{
+       struct drm_crtc_commit *commit;
+       int i = 0;
+
+       list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
+               /* skip the first entry, that's the current commit */
+               if (i == 1)
+                       return commit;
+               i++;
+       }
+
+       return NULL;
+}
+
+/**
+ * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
+ * @state: new modeset state to be committed
+ *
+ * This function waits for all preceeding commits that touch the same CRTC as
+ * @state to both be committed to the hardware (as signalled by
+ * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
+ * by calling drm_crtc_vblank_send_event on the event member of
+ * &drm_crtc_state).
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc_commit *commit;
+       int i;
+       long ret;
+
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               spin_lock(&crtc->commit_lock);
+               commit = preceeding_commit(crtc);
+               if (commit)
+                       drm_crtc_commit_get(commit);
+               spin_unlock(&crtc->commit_lock);
+
+               if (!commit)
+                       continue;
+
+               ret = wait_for_completion_timeout(&commit->hw_done,
+                                                 10*HZ);
+               if (ret == 0)
+                       DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
+                                 crtc->base.id, crtc->name);
+
+               /* Currently no support for overwriting flips, hence
+                * stall for previous one to execute completely. */
+               ret = wait_for_completion_timeout(&commit->flip_done,
+                                                 10*HZ);
+               if (ret == 0)
+                       DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
+                                 crtc->base.id, crtc->name);
+
+               drm_crtc_commit_put(commit);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
+
+/**
+ * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
+ * @state: new modeset state to be committed
+ *
+ * This function is used to signal completion of the hardware commit step. After
+ * this step the driver is not allowed to read or change any permanent software
+ * or hardware modeset state. The only exception is state protected by other
+ * means than &drm_modeset_lock locks.
+ *
+ * Drivers should try to postpone any expensive or delayed cleanup work after
+ * this function is called.
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc_commit *commit;
+       int i;
+
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               commit = state->crtcs[i].commit;
+               if (!commit)
+                       continue;
+
+               /* backend must have consumed any event by now */
+               WARN_ON(crtc->state->event);
+               spin_lock(&crtc->commit_lock);
+               complete_all(&commit->hw_done);
+               spin_unlock(&crtc->commit_lock);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
+
+/**
+ * drm_atomic_helper_commit_cleanup_done - signal completion of commit
+ * @state: new modeset state to be committed
+ *
+ * This signals completion of the atomic update @state, including any cleanup
+ * work. If used, it must be called right before calling
+ * drm_atomic_state_free().
+ *
+ * This is part of the atomic helper support for nonblocking commits, see
+ * drm_atomic_helper_setup_commit() for an overview.
+ */
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc_commit *commit;
+       int i;
+       long ret;
+
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               commit = state->crtcs[i].commit;
+               if (WARN_ON(!commit))
+                       continue;
+
+               spin_lock(&crtc->commit_lock);
+               complete_all(&commit->cleanup_done);
+               WARN_ON(!try_wait_for_completion(&commit->hw_done));
+
+               /* commit_list borrows our reference, need to remove before we
+                * clean up our drm_atomic_state. But only after it actually
+                * completed, otherwise subsequent commits won't stall properly. */
+               if (try_wait_for_completion(&commit->flip_done))
+                       goto del_commit;
+
+               spin_unlock(&crtc->commit_lock);
+
+               /* We must wait for the vblank event to signal our completion
+                * before releasing our reference, since the vblank work does
+                * not hold a reference of its own. */
+               ret = wait_for_completion_timeout(&commit->flip_done,
+                                                 10*HZ);
+               if (ret == 0)
+                       DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
+                                 crtc->base.id, crtc->name);
+
+               spin_lock(&crtc->commit_lock);
+del_commit:
+               list_del(&commit->commit_entry);
+               spin_unlock(&crtc->commit_lock);
+       }
+}
+EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
+
 /**
  * drm_atomic_helper_prepare_planes - prepare plane resources before commit
  * @dev: DRM device
@@ -1249,16 +1616,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
 int drm_atomic_helper_prepare_planes(struct drm_device *dev,
                                     struct drm_atomic_state *state)
 {
-       int nplanes = dev->mode_config.num_total_plane;
-       int ret, i;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
+       int ret, i, j;
 
-       for (i = 0; i < nplanes; i++) {
+       for_each_plane_in_state(state, plane, plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
-               struct drm_plane *plane = state->planes[i];
-               struct drm_plane_state *plane_state = state->plane_states[i];
-
-               if (!plane)
-                       continue;
 
                funcs = plane->helper_private;
 
@@ -1272,12 +1635,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
        return 0;
 
 fail:
-       for (i--; i >= 0; i--) {
+       for_each_plane_in_state(state, plane, plane_state, j) {
                const struct drm_plane_helper_funcs *funcs;
-               struct drm_plane *plane = state->planes[i];
-               struct drm_plane_state *plane_state = state->plane_states[i];
 
-               if (!plane)
+               if (j >= i)
                        continue;
 
                funcs = plane->helper_private;
@@ -1537,8 +1898,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
 
 /**
  * drm_atomic_helper_swap_state - store atomic state into current sw state
- * @dev: DRM device
  * @state: atomic state
+ * @stall: stall for proceeding commits
  *
  * This function stores the atomic state into the current state pointers in all
  * driver objects. It should be called after all failing steps have been done
@@ -1559,42 +1920,70 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
  *
  * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
  * contains the old state. Also do any other cleanup required with that state.
+ *
+ * @stall must be set when nonblocking commits for this driver directly access
+ * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the
+ * current atomic helpers this is almost always the case, since the helpers
+ * don't pass the right state structures to the callbacks.
  */
-void drm_atomic_helper_swap_state(struct drm_device *dev,
-                                 struct drm_atomic_state *state)
+void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
+                                 bool stall)
 {
        int i;
+       long ret;
+       struct drm_connector *connector;
+       struct drm_connector_state *conn_state;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
+       struct drm_crtc_commit *commit;
+
+       if (stall) {
+               for_each_crtc_in_state(state, crtc, crtc_state, i) {
+                       spin_lock(&crtc->commit_lock);
+                       commit = list_first_entry_or_null(&crtc->commit_list,
+                                       struct drm_crtc_commit, commit_entry);
+                       if (commit)
+                               drm_crtc_commit_get(commit);
+                       spin_unlock(&crtc->commit_lock);
+
+                       if (!commit)
+                               continue;
 
-       for (i = 0; i < state->num_connector; i++) {
-               struct drm_connector *connector = state->connectors[i];
-
-               if (!connector)
-                       continue;
+                       ret = wait_for_completion_timeout(&commit->hw_done,
+                                                         10*HZ);
+                       if (ret == 0)
+                               DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
+                                         crtc->base.id, crtc->name);
+                       drm_crtc_commit_put(commit);
+               }
+       }
 
+       for_each_connector_in_state(state, connector, conn_state, i) {
                connector->state->state = state;
-               swap(state->connector_states[i], connector->state);
+               swap(state->connectors[i].state, connector->state);
                connector->state->state = NULL;
        }
 
-       for (i = 0; i < dev->mode_config.num_crtc; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-
-               if (!crtc)
-                       continue;
-
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
                crtc->state->state = state;
-               swap(state->crtc_states[i], crtc->state);
+               swap(state->crtcs[i].state, crtc->state);
                crtc->state->state = NULL;
-       }
 
-       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
-               struct drm_plane *plane = state->planes[i];
+               if (state->crtcs[i].commit) {
+                       spin_lock(&crtc->commit_lock);
+                       list_add(&state->crtcs[i].commit->commit_entry,
+                                &crtc->commit_list);
+                       spin_unlock(&crtc->commit_lock);
 
-               if (!plane)
-                       continue;
+                       state->crtcs[i].commit->event = NULL;
+               }
+       }
 
+       for_each_plane_in_state(state, plane, plane_state, i) {
                plane->state->state = state;
-               swap(state->plane_states[i], plane->state);
+               swap(state->planes[i].state, plane->state);
                plane->state->state = NULL;
        }
 }
@@ -2409,7 +2798,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
  * This is the main helper function provided by the atomic helper framework for
  * implementing the legacy DPMS connector interface. It computes the new desired
  * ->active state for the corresponding CRTC (if the connector is enabled) and
- *  updates it.
+ * updates it.
  *
  * Returns:
  * Returns 0 on success, negative errno numbers on failure.
@@ -2930,16 +3319,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
  * @red: red correction table
  * @green: green correction table
  * @blue: green correction table
- * @start:
  * @size: size of the tables
  *
  * Implements support for legacy gamma correction table for drivers
  * that support color management through the DEGAMMA_LUT/GAMMA_LUT
  * properties.
  */
-void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
-                                       u16 *red, u16 *green, u16 *blue,
-                                       uint32_t start, uint32_t size)
+int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+                                      u16 *red, u16 *green, u16 *blue,
+                                      uint32_t size)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_mode_config *config = &dev->mode_config;
@@ -2951,7 +3339,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
 
        state = drm_atomic_state_alloc(crtc->dev);
        if (!state)
-               return;
+               return -ENOMEM;
 
        blob = drm_property_create_blob(dev,
                                        sizeof(struct drm_color_lut) * size,
@@ -3002,7 +3390,7 @@ retry:
 
        drm_property_unreference_blob(blob);
 
-       return;
+       return 0;
 fail:
        if (ret == -EDEADLK)
                goto backoff;
@@ -3010,7 +3398,7 @@ fail:
        drm_atomic_state_free(state);
        drm_property_unreference_blob(blob);
 
-       return;
+       return ret;
 backoff:
        drm_atomic_state_clear(state);
        drm_atomic_legacy_backoff(state);
index 50d0baa06db057057341dce5dfab930782785510..4153e8a193af39c01d6ff20326818d052cef4287 100644 (file)
 
 #include <drm/drmP.h>
 #include "drm_internal.h"
+#include "drm_legacy.h"
 
 /**
- * drm_getmagic - Get unique magic of a client
- * @dev: DRM device to operate on
- * @data: ioctl data containing the drm_auth object
- * @file_priv: DRM file that performs the operation
+ * DOC: master and authentication
  *
- * This looks up the unique magic of the passed client and returns it. If the
- * client did not have a magic assigned, yet, a new one is registered. The magic
- * is stored in the passed drm_auth object.
+ * struct &drm_master is used to track groups of clients with open
+ * primary/legacy device nodes. For every struct &drm_file which has had at
+ * least once successfully became the device master (either through the
+ * SET_MASTER IOCTL, or implicitly through opening the primary device node when
+ * no one else is the current master that time) there exists one &drm_master.
+ * This is noted in the is_master member of &drm_file. All other clients have
+ * just a pointer to the &drm_master they are associated with.
  *
- * Returns: 0 on success, negative error code on failure.
+ * In addition only one &drm_master can be the current master for a &drm_device.
+ * It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or
+ * implicitly through closing/openeing the primary device node. See also
+ * drm_is_current_master().
+ *
+ * Clients can authenticate against the current master (if it matches their own)
+ * using the GETMAGIC and AUTHMAGIC IOCTLs. Together with exchanging masters,
+ * this allows controlled access to the device for an entire group of mutually
+ * trusted clients.
  */
+
 int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
        struct drm_auth *auth = data;
        int ret = 0;
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev->master_mutex);
        if (!file_priv->magic) {
                ret = idr_alloc(&file_priv->master->magic_map, file_priv,
                                1, 0, GFP_KERNEL);
@@ -56,23 +67,13 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
                        file_priv->magic = ret;
        }
        auth->magic = file_priv->magic;
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->master_mutex);
 
        DRM_DEBUG("%u\n", auth->magic);
 
        return ret < 0 ? ret : 0;
 }
 
-/**
- * drm_authmagic - Authenticate client with a magic
- * @dev: DRM device to operate on
- * @data: ioctl data containing the drm_auth object
- * @file_priv: DRM file that performs the operation
- *
- * This looks up a DRM client by the passed magic and authenticates it.
- *
- * Returns: 0 on success, negative error code on failure.
- */
 int drm_authmagic(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
 {
@@ -81,13 +82,253 @@ int drm_authmagic(struct drm_device *dev, void *data,
 
        DRM_DEBUG("%u\n", auth->magic);
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev->master_mutex);
        file = idr_find(&file_priv->master->magic_map, auth->magic);
        if (file) {
                file->authenticated = 1;
                idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
        }
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->master_mutex);
 
        return file ? 0 : -EINVAL;
 }
+
+static struct drm_master *drm_master_create(struct drm_device *dev)
+{
+       struct drm_master *master;
+
+       master = kzalloc(sizeof(*master), GFP_KERNEL);
+       if (!master)
+               return NULL;
+
+       kref_init(&master->refcount);
+       spin_lock_init(&master->lock.spinlock);
+       init_waitqueue_head(&master->lock.lock_queue);
+       idr_init(&master->magic_map);
+       master->dev = dev;
+
+       return master;
+}
+
+static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
+                         bool new_master)
+{
+       int ret = 0;
+
+       dev->master = drm_master_get(fpriv->master);
+       if (dev->driver->master_set) {
+               ret = dev->driver->master_set(dev, fpriv, new_master);
+               if (unlikely(ret != 0)) {
+                       drm_master_put(&dev->master);
+               }
+       }
+
+       return ret;
+}
+
+static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
+{
+       struct drm_master *old_master;
+       int ret;
+
+       lockdep_assert_held_once(&dev->master_mutex);
+
+       old_master = fpriv->master;
+       fpriv->master = drm_master_create(dev);
+       if (!fpriv->master) {
+               fpriv->master = old_master;
+               return -ENOMEM;
+       }
+
+       if (dev->driver->master_create) {
+               ret = dev->driver->master_create(dev, fpriv->master);
+               if (ret)
+                       goto out_err;
+       }
+       fpriv->is_master = 1;
+       fpriv->authenticated = 1;
+
+       ret = drm_set_master(dev, fpriv, true);
+       if (ret)
+               goto out_err;
+
+       if (old_master)
+               drm_master_put(&old_master);
+
+       return 0;
+
+out_err:
+       /* drop references and restore old master on failure */
+       drm_master_put(&fpriv->master);
+       fpriv->master = old_master;
+
+       return ret;
+}
+
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
+{
+       int ret = 0;
+
+       mutex_lock(&dev->master_mutex);
+       if (drm_is_current_master(file_priv))
+               goto out_unlock;
+
+       if (dev->master) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       if (!file_priv->master) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       if (!file_priv->is_master) {
+               ret = drm_new_set_master(dev, file_priv);
+               goto out_unlock;
+       }
+
+       ret = drm_set_master(dev, file_priv, false);
+out_unlock:
+       mutex_unlock(&dev->master_mutex);
+       return ret;
+}
+
+static void drm_drop_master(struct drm_device *dev,
+                           struct drm_file *fpriv)
+{
+       if (dev->driver->master_drop)
+               dev->driver->master_drop(dev, fpriv);
+       drm_master_put(&dev->master);
+}
+
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       int ret = -EINVAL;
+
+       mutex_lock(&dev->master_mutex);
+       if (!drm_is_current_master(file_priv))
+               goto out_unlock;
+
+       if (!dev->master)
+               goto out_unlock;
+
+       ret = 0;
+       drm_drop_master(dev, file_priv);
+out_unlock:
+       mutex_unlock(&dev->master_mutex);
+       return ret;
+}
+
+int drm_master_open(struct drm_file *file_priv)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       int ret = 0;
+
+       /* if there is no current master make this fd it, but do not create
+        * any master object for render clients */
+       mutex_lock(&dev->master_mutex);
+       if (!dev->master)
+               ret = drm_new_set_master(dev, file_priv);
+       else
+               file_priv->master = drm_master_get(dev->master);
+       mutex_unlock(&dev->master_mutex);
+
+       return ret;
+}
+
+void drm_master_release(struct drm_file *file_priv)
+{
+       struct drm_device *dev = file_priv->minor->dev;
+       struct drm_master *master = file_priv->master;
+
+       mutex_lock(&dev->master_mutex);
+       if (file_priv->magic)
+               idr_remove(&file_priv->master->magic_map, file_priv->magic);
+
+       if (!drm_is_current_master(file_priv))
+               goto out;
+
+       if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+               /*
+                * Since the master is disappearing, so is the
+                * possibility to lock.
+                */
+               mutex_lock(&dev->struct_mutex);
+               if (master->lock.hw_lock) {
+                       if (dev->sigdata.lock == master->lock.hw_lock)
+                               dev->sigdata.lock = NULL;
+                       master->lock.hw_lock = NULL;
+                       master->lock.file_priv = NULL;
+                       wake_up_interruptible_all(&master->lock.lock_queue);
+               }
+               mutex_unlock(&dev->struct_mutex);
+       }
+
+       if (dev->master == file_priv->master)
+               drm_drop_master(dev, file_priv);
+out:
+       /* drop the master reference held by the file priv */
+       if (file_priv->master)
+               drm_master_put(&file_priv->master);
+       mutex_unlock(&dev->master_mutex);
+}
+
+/**
+ * drm_is_current_master - checks whether @priv is the current master
+ * @fpriv: DRM file private
+ *
+ * Checks whether @fpriv is current master on its device. This decides whether a
+ * client is allowed to run DRM_MASTER IOCTLs.
+ *
+ * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting
+ * - the current master is assumed to own the non-shareable display hardware.
+ */
+bool drm_is_current_master(struct drm_file *fpriv)
+{
+       return fpriv->is_master && fpriv->master == fpriv->minor->dev->master;
+}
+EXPORT_SYMBOL(drm_is_current_master);
+
+/**
+ * drm_master_get - reference a master pointer
+ * @master: struct &drm_master
+ *
+ * Increments the reference count of @master and returns a pointer to @master.
+ */
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+       kref_get(&master->refcount);
+       return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+       struct drm_master *master = container_of(kref, struct drm_master, refcount);
+       struct drm_device *dev = master->dev;
+
+       if (dev->driver->master_destroy)
+               dev->driver->master_destroy(dev, master);
+
+       drm_legacy_master_rmmaps(dev, master);
+
+       idr_destroy(&master->magic_map);
+       kfree(master->unique);
+       kfree(master);
+}
+
+/**
+ * drm_master_put - unreference and clear a master pointer
+ * @master: pointer to a pointer of struct &drm_master
+ *
+ * This decrements the &drm_master behind @master and sets it to NULL.
+ */
+void drm_master_put(struct drm_master **master)
+{
+       kref_put(&(*master)->refcount, drm_master_destroy);
+       *master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
index b3654404abd02d70f1db264ab1db4e59d372daaf..255543086590610d9c80b7f90ba27a6088858756 100644 (file)
@@ -36,7 +36,7 @@
  * encoder chain.
  *
  * A bridge is always attached to a single &drm_encoder at a time, but can be
- * either connected to it directly, or through an intermediate bridge:
+ * either connected to it directly, or through an intermediate bridge::
  *
  *     encoder ---> bridge B ---> bridge A
  *
index 9b34158c0f77b71a5d27051c281a57fff5895888..c3a12cd8bd0de963e410d921c25e0bb7eeba46c9 100644 (file)
@@ -51,7 +51,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
                 */
                if (!entry->map ||
                    map->type != entry->map->type ||
-                   entry->master != dev->primary->master)
+                   entry->master != dev->master)
                        continue;
                switch (map->type) {
                case _DRM_SHM:
@@ -245,12 +245,12 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
                map->offset = (unsigned long)map->handle;
                if (map->flags & _DRM_CONTAINS_LOCK) {
                        /* Prevent a 2nd X Server from creating a 2nd lock */
-                       if (dev->primary->master->lock.hw_lock != NULL) {
+                       if (dev->master->lock.hw_lock != NULL) {
                                vfree(map->handle);
                                kfree(map);
                                return -EBUSY;
                        }
-                       dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;   /* Pointer to lock */
+                       dev->sigdata.lock = dev->master->lock.hw_lock = map->handle;    /* Pointer to lock */
                }
                break;
        case _DRM_AGP: {
@@ -356,7 +356,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
        mutex_unlock(&dev->struct_mutex);
 
        if (!(map->flags & _DRM_DRIVER))
-               list->master = dev->primary->master;
+               list->master = dev->master;
        *maplist = list;
        return 0;
 }
index 059f7c39c582827c1ad923c3f2fb6a203a235b52..a7916e5f8864f554417e30d434643b9e8e129ae1 100644 (file)
@@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
                mb();
                for (; addr < end; addr += size)
                        clflushopt(addr);
+               clflushopt(end - 1); /* force serialisation */
                mb();
                return;
        }
index d2a6d958ca76068bf45b59e98dbe2a709ecf8817..f1d9f0569d7f86514773e517bafba9326de82a45 100644 (file)
@@ -39,6 +39,7 @@
 #include <drm/drm_fourcc.h>
 #include <drm/drm_modeset_lock.h>
 #include <drm/drm_atomic.h>
+#include <drm/drm_auth.h>
 
 #include "drm_crtc_internal.h"
 #include "drm_internal.h"
@@ -239,37 +240,6 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order)
 }
 EXPORT_SYMBOL(drm_get_subpixel_order_name);
 
-static char printable_char(int c)
-{
-       return isascii(c) && isprint(c) ? c : '?';
-}
-
-/**
- * drm_get_format_name - return a string for drm fourcc format
- * @format: format to compute name of
- *
- * Note that the buffer used by this function is globally shared and owned by
- * the function itself.
- *
- * FIXME: This isn't really multithreading safe.
- */
-const char *drm_get_format_name(uint32_t format)
-{
-       static char buf[32];
-
-       snprintf(buf, sizeof(buf),
-                "%c%c%c%c %s-endian (0x%08x)",
-                printable_char(format & 0xff),
-                printable_char((format >> 8) & 0xff),
-                printable_char((format >> 16) & 0xff),
-                printable_char((format >> 24) & 0x7f),
-                format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
-                format);
-
-       return buf;
-}
-EXPORT_SYMBOL(drm_get_format_name);
-
 /*
  * Internal function to assign a slot in the object idr and optionally
  * register the object into the idr.
@@ -426,6 +396,51 @@ void drm_mode_object_reference(struct drm_mode_object *obj)
 }
 EXPORT_SYMBOL(drm_mode_object_reference);
 
+/**
+ * drm_crtc_force_disable - Forcibly turn off a CRTC
+ * @crtc: CRTC to turn off
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_force_disable(struct drm_crtc *crtc)
+{
+       struct drm_mode_set set = {
+               .crtc = crtc,
+       };
+
+       return drm_mode_set_config_internal(&set);
+}
+EXPORT_SYMBOL(drm_crtc_force_disable);
+
+/**
+ * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
+ * @dev: DRM device whose CRTCs to turn off
+ *
+ * Drivers may want to call this on unload to ensure that all displays are
+ * unlit and the GPU is in a consistent, low power state. Takes modeset locks.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_crtc_force_disable_all(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       drm_modeset_lock_all(dev);
+       drm_for_each_crtc(crtc, dev)
+               if (crtc->enabled) {
+                       ret = drm_crtc_force_disable(crtc);
+                       if (ret)
+                               goto out;
+               }
+out:
+       drm_modeset_unlock_all(dev);
+       return ret;
+}
+EXPORT_SYMBOL(drm_crtc_force_disable_all);
+
 static void drm_framebuffer_free(struct kref *kref)
 {
        struct drm_framebuffer *fb =
@@ -535,7 +550,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
  *
  * Cleanup framebuffer. This function is intended to be used from the drivers
  * ->destroy callback. It can also be used to clean up driver private
- *  framebuffers embedded into a larger structure.
+ * framebuffers embedded into a larger structure.
  *
  * Note that this function does not remove the fb from active usuage - if it is
  * still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -574,8 +589,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
        struct drm_device *dev;
        struct drm_crtc *crtc;
        struct drm_plane *plane;
-       struct drm_mode_set set;
-       int ret;
 
        if (!fb)
                return;
@@ -605,11 +618,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
                drm_for_each_crtc(crtc, dev) {
                        if (crtc->primary->fb == fb) {
                                /* should turn off the crtc */
-                               memset(&set, 0, sizeof(struct drm_mode_set));
-                               set.crtc = crtc;
-                               set.fb = NULL;
-                               ret = drm_mode_set_config_internal(&set);
-                               if (ret)
+                               if (drm_crtc_force_disable(crtc))
                                        DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
                        }
                }
@@ -639,6 +648,31 @@ static unsigned int drm_num_crtcs(struct drm_device *dev)
        return num;
 }
 
+static int drm_crtc_register_all(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+       int ret = 0;
+
+       drm_for_each_crtc(crtc, dev) {
+               if (crtc->funcs->late_register)
+                       ret = crtc->funcs->late_register(crtc);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void drm_crtc_unregister_all(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+
+       drm_for_each_crtc(crtc, dev) {
+               if (crtc->funcs->early_unregister)
+                       crtc->funcs->early_unregister(crtc);
+       }
+}
+
 /**
  * drm_crtc_init_with_planes - Initialise a new CRTC object with
  *    specified primary and cursor planes.
@@ -669,6 +703,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        crtc->dev = dev;
        crtc->funcs = funcs;
 
+       INIT_LIST_HEAD(&crtc->commit_list);
+       spin_lock_init(&crtc->commit_lock);
+
        drm_modeset_lock_init(&crtc->mutex);
        ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
        if (ret)
@@ -692,7 +729,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        crtc->base.properties = &crtc->properties;
 
        list_add_tail(&crtc->head, &config->crtc_list);
-       config->num_crtc++;
+       crtc->index = config->num_crtc++;
 
        crtc->primary = primary;
        crtc->cursor = cursor;
@@ -722,6 +759,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
 
+       /* Note that the crtc_list is considered to be static; should we
+        * remove the drm_crtc at runtime we would have to decrement all
+        * the indices on the drm_crtc after us in the crtc_list.
+        */
+
        kfree(crtc->gamma_store);
        crtc->gamma_store = NULL;
 
@@ -741,29 +783,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_crtc_cleanup);
 
-/**
- * drm_crtc_index - find the index of a registered CRTC
- * @crtc: CRTC to find index for
- *
- * Given a registered CRTC, return the index of that CRTC within a DRM
- * device's list of CRTCs.
- */
-unsigned int drm_crtc_index(struct drm_crtc *crtc)
-{
-       unsigned int index = 0;
-       struct drm_crtc *tmp;
-
-       drm_for_each_crtc(tmp, crtc->dev) {
-               if (tmp == crtc)
-                       return index;
-
-               index++;
-       }
-
-       BUG();
-}
-EXPORT_SYMBOL(drm_crtc_index);
-
 /*
  * drm_mode_remove - remove and free a mode
  * @connector: connector list to modify
@@ -909,11 +928,11 @@ int drm_connector_init(struct drm_device *dev,
        connector->dev = dev;
        connector->funcs = funcs;
 
-       connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
-       if (connector->connector_id < 0) {
-               ret = connector->connector_id;
+       ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+       if (ret < 0)
                goto out_put;
-       }
+       connector->index = ret;
+       ret = 0;
 
        connector->connector_type = connector_type;
        connector->connector_type_id =
@@ -961,7 +980,7 @@ out_put_type_id:
                ida_remove(connector_ida, connector->connector_type_id);
 out_put_id:
        if (ret)
-               ida_remove(&config->connector_ida, connector->connector_id);
+               ida_remove(&config->connector_ida, connector->index);
 out_put:
        if (ret)
                drm_mode_object_unregister(dev, &connector->base);
@@ -984,6 +1003,12 @@ void drm_connector_cleanup(struct drm_connector *connector)
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *mode, *t;
 
+       /* The connector should have been removed from userspace long before
+        * it is finally destroyed.
+        */
+       if (WARN_ON(connector->registered))
+               drm_connector_unregister(connector);
+
        if (connector->tile_group) {
                drm_mode_put_tile_group(dev, connector->tile_group);
                connector->tile_group = NULL;
@@ -999,7 +1024,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
                   connector->connector_type_id);
 
        ida_remove(&dev->mode_config.connector_ida,
-                  connector->connector_id);
+                  connector->index);
 
        kfree(connector->display_info.bus_formats);
        drm_mode_object_unregister(dev, &connector->base);
@@ -1030,19 +1055,34 @@ int drm_connector_register(struct drm_connector *connector)
 {
        int ret;
 
+       if (connector->registered)
+               return 0;
+
        ret = drm_sysfs_connector_add(connector);
        if (ret)
                return ret;
 
        ret = drm_debugfs_connector_add(connector);
        if (ret) {
-               drm_sysfs_connector_remove(connector);
-               return ret;
+               goto err_sysfs;
+       }
+
+       if (connector->funcs->late_register) {
+               ret = connector->funcs->late_register(connector);
+               if (ret)
+                       goto err_debugfs;
        }
 
        drm_mode_object_register(connector->dev, &connector->base);
 
+       connector->registered = true;
        return 0;
+
+err_debugfs:
+       drm_debugfs_connector_remove(connector);
+err_sysfs:
+       drm_sysfs_connector_remove(connector);
+       return ret;
 }
 EXPORT_SYMBOL(drm_connector_register);
 
@@ -1054,28 +1094,29 @@ EXPORT_SYMBOL(drm_connector_register);
  */
 void drm_connector_unregister(struct drm_connector *connector)
 {
+       if (!connector->registered)
+               return;
+
+       if (connector->funcs->early_unregister)
+               connector->funcs->early_unregister(connector);
+
        drm_sysfs_connector_remove(connector);
        drm_debugfs_connector_remove(connector);
+
+       connector->registered = false;
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 
-/**
- * drm_connector_register_all - register all connectors
- * @dev: drm device
- *
- * This function registers all connectors in sysfs and other places so that
- * userspace can start to access them. Drivers can call it after calling
- * drm_dev_register() to complete the device registration, if they don't call
- * drm_connector_register() on each connector individually.
- *
- * When a device is unplugged and should be removed from userspace access,
- * call drm_connector_unregister_all(), which is the inverse of this
- * function.
- *
- * Returns:
- * Zero on success, error code on failure.
- */
-int drm_connector_register_all(struct drm_device *dev)
+static void drm_connector_unregister_all(struct drm_device *dev)
+{
+       struct drm_connector *connector;
+
+       /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               drm_connector_unregister(connector);
+}
+
+static int drm_connector_register_all(struct drm_device *dev)
 {
        struct drm_connector *connector;
        int ret;
@@ -1097,27 +1138,31 @@ err:
        drm_connector_unregister_all(dev);
        return ret;
 }
-EXPORT_SYMBOL(drm_connector_register_all);
 
-/**
- * drm_connector_unregister_all - unregister connector userspace interfaces
- * @dev: drm device
- *
- * This functions unregisters all connectors from sysfs and other places so
- * that userspace can no longer access them. Drivers should call this as the
- * first step tearing down the device instace, or when the underlying
- * physical device disappeared (e.g. USB unplug), right before calling
- * drm_dev_unregister().
- */
-void drm_connector_unregister_all(struct drm_device *dev)
+static int drm_encoder_register_all(struct drm_device *dev)
 {
-       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       int ret = 0;
 
-       /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
-               drm_connector_unregister(connector);
+       drm_for_each_encoder(encoder, dev) {
+               if (encoder->funcs->late_register)
+                       ret = encoder->funcs->late_register(encoder);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void drm_encoder_unregister_all(struct drm_device *dev)
+{
+       struct drm_encoder *encoder;
+
+       drm_for_each_encoder(encoder, dev) {
+               if (encoder->funcs->early_unregister)
+                       encoder->funcs->early_unregister(encoder);
+       }
 }
-EXPORT_SYMBOL(drm_connector_unregister_all);
 
 /**
  * drm_encoder_init - Init a preallocated encoder
@@ -1166,7 +1211,7 @@ int drm_encoder_init(struct drm_device *dev,
        }
 
        list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
-       dev->mode_config.num_encoder++;
+       encoder->index = dev->mode_config.num_encoder++;
 
 out_put:
        if (ret)
@@ -1179,29 +1224,6 @@ out_unlock:
 }
 EXPORT_SYMBOL(drm_encoder_init);
 
-/**
- * drm_encoder_index - find the index of a registered encoder
- * @encoder: encoder to find index for
- *
- * Given a registered encoder, return the index of that encoder within a DRM
- * device's list of encoders.
- */
-unsigned int drm_encoder_index(struct drm_encoder *encoder)
-{
-       unsigned int index = 0;
-       struct drm_encoder *tmp;
-
-       drm_for_each_encoder(tmp, encoder->dev) {
-               if (tmp == encoder)
-                       return index;
-
-               index++;
-       }
-
-       BUG();
-}
-EXPORT_SYMBOL(drm_encoder_index);
-
 /**
  * drm_encoder_cleanup - cleans up an initialised encoder
  * @encoder: encoder to cleanup
@@ -1212,6 +1234,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
 
+       /* Note that the encoder_list is considered to be static; should we
+        * remove the drm_encoder at runtime we would have to decrement all
+        * the indices on the drm_encoder after us in the encoder_list.
+        */
+
        drm_modeset_lock_all(dev);
        drm_mode_object_unregister(dev, &encoder->base);
        kfree(encoder->name);
@@ -1300,7 +1327,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
        plane->type = type;
 
        list_add_tail(&plane->head, &config->plane_list);
-       config->num_total_plane++;
+       plane->index = config->num_total_plane++;
        if (plane->type == DRM_PLANE_TYPE_OVERLAY)
                config->num_overlay_plane++;
 
@@ -1325,6 +1352,31 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
 }
 EXPORT_SYMBOL(drm_universal_plane_init);
 
+static int drm_plane_register_all(struct drm_device *dev)
+{
+       struct drm_plane *plane;
+       int ret = 0;
+
+       drm_for_each_plane(plane, dev) {
+               if (plane->funcs->late_register)
+                       ret = plane->funcs->late_register(plane);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void drm_plane_unregister_all(struct drm_device *dev)
+{
+       struct drm_plane *plane;
+
+       drm_for_each_plane(plane, dev) {
+               if (plane->funcs->early_unregister)
+                       plane->funcs->early_unregister(plane);
+       }
+}
+
 /**
  * drm_plane_init - Initialize a legacy plane
  * @dev: DRM device
@@ -1374,6 +1426,11 @@ void drm_plane_cleanup(struct drm_plane *plane)
 
        BUG_ON(list_empty(&plane->head));
 
+       /* Note that the plane_list is considered to be static; should we
+        * remove the drm_plane at runtime we would have to decrement all
+        * the indices on the drm_plane after us in the plane_list.
+        */
+
        list_del(&plane->head);
        dev->mode_config.num_total_plane--;
        if (plane->type == DRM_PLANE_TYPE_OVERLAY)
@@ -1390,29 +1447,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
 }
 EXPORT_SYMBOL(drm_plane_cleanup);
 
-/**
- * drm_plane_index - find the index of a registered plane
- * @plane: plane to find index for
- *
- * Given a registered plane, return the index of that CRTC within a DRM
- * device's list of planes.
- */
-unsigned int drm_plane_index(struct drm_plane *plane)
-{
-       unsigned int index = 0;
-       struct drm_plane *tmp;
-
-       drm_for_each_plane(tmp, plane->dev) {
-               if (tmp == plane)
-                       return index;
-
-               index++;
-       }
-
-       BUG();
-}
-EXPORT_SYMBOL(drm_plane_index);
-
 /**
  * drm_plane_from_index - find the registered plane at an index
  * @dev: DRM device
@@ -1425,13 +1459,11 @@ struct drm_plane *
 drm_plane_from_index(struct drm_device *dev, int idx)
 {
        struct drm_plane *plane;
-       unsigned int i = 0;
 
-       drm_for_each_plane(plane, dev) {
-               if (i == idx)
+       drm_for_each_plane(plane, dev)
+               if (idx == plane->index)
                        return plane;
-               i++;
-       }
+
        return NULL;
 }
 EXPORT_SYMBOL(drm_plane_from_index);
@@ -1467,6 +1499,46 @@ void drm_plane_force_disable(struct drm_plane *plane)
 }
 EXPORT_SYMBOL(drm_plane_force_disable);
 
+int drm_modeset_register_all(struct drm_device *dev)
+{
+       int ret;
+
+       ret = drm_plane_register_all(dev);
+       if (ret)
+               goto err_plane;
+
+       ret = drm_crtc_register_all(dev);
+       if  (ret)
+               goto err_crtc;
+
+       ret = drm_encoder_register_all(dev);
+       if (ret)
+               goto err_encoder;
+
+       ret = drm_connector_register_all(dev);
+       if (ret)
+               goto err_connector;
+
+       return 0;
+
+err_connector:
+       drm_encoder_unregister_all(dev);
+err_encoder:
+       drm_crtc_unregister_all(dev);
+err_crtc:
+       drm_plane_unregister_all(dev);
+err_plane:
+       return ret;
+}
+
+void drm_modeset_unregister_all(struct drm_device *dev)
+{
+       drm_connector_unregister_all(dev);
+       drm_encoder_unregister_all(dev);
+       drm_crtc_unregister_all(dev);
+       drm_plane_unregister_all(dev);
+}
+
 static int drm_mode_create_standard_properties(struct drm_device *dev)
 {
        struct drm_property *prop;
@@ -2821,8 +2893,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
                        goto out;
                }
 
-               drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-
                /*
                 * Check whether the primary plane supports the fb pixel format.
                 * Drivers not implementing the universal planes API use a
@@ -2977,6 +3047,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
                                DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
                                return PTR_ERR(fb);
                        }
+                       fb->hot_x = req->hot_x;
+                       fb->hot_y = req->hot_y;
                } else {
                        fb = NULL;
                }
@@ -3583,7 +3655,7 @@ int drm_mode_getfb(struct drm_device *dev,
        r->bpp = fb->bits_per_pixel;
        r->pitch = fb->pitches[0];
        if (fb->funcs->create_handle) {
-               if (file_priv->is_master || capable(CAP_SYS_ADMIN) ||
+               if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
                    drm_is_control_client(file_priv)) {
                        ret = fb->funcs->create_handle(fb, file_priv,
                                                       &r->handle);
@@ -3740,6 +3812,13 @@ void drm_fb_release(struct drm_file *priv)
        }
 }
 
+static bool drm_property_type_valid(struct drm_property *property)
+{
+       if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+               return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+       return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
+}
+
 /**
  * drm_property_create - create a new property type
  * @dev: drm device
@@ -4841,7 +4920,8 @@ bool drm_property_change_valid_get(struct drm_property *property,
                if (value == 0)
                        return true;
 
-               return _object_find(property->dev, value, property->values[0]) != NULL;
+               *ref = _object_find(property->dev, value, property->values[0]);
+               return *ref != NULL;
        }
 
        for (i = 0; i < property->num_values; i++)
@@ -5139,6 +5219,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
 int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
                                 int gamma_size)
 {
+       uint16_t *r_base, *g_base, *b_base;
+       int i;
+
        crtc->gamma_size = gamma_size;
 
        crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
@@ -5148,6 +5231,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
                return -ENOMEM;
        }
 
+       r_base = crtc->gamma_store;
+       g_base = r_base + gamma_size;
+       b_base = g_base + gamma_size;
+       for (i = 0; i < gamma_size; i++) {
+               r_base[i] = i << 8;
+               g_base[i] = i << 8;
+               b_base[i] = i << 8;
+       }
+
+
        return 0;
 }
 EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
@@ -5215,7 +5308,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
                goto out;
        }
 
-       crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+       ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
 
 out:
        drm_modeset_unlock_all(dev);
@@ -5544,264 +5637,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
        return dev->driver->dumb_destroy(file_priv, dev, args->handle);
 }
 
-/**
- * drm_fb_get_bpp_depth - get the bpp/depth values for format
- * @format: pixel format (DRM_FORMAT_*)
- * @depth: storage for the depth value
- * @bpp: storage for the bpp value
- *
- * This only supports RGB formats here for compat with code that doesn't use
- * pixel formats directly yet.
- */
-void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
-                         int *bpp)
-{
-       switch (format) {
-       case DRM_FORMAT_C8:
-       case DRM_FORMAT_RGB332:
-       case DRM_FORMAT_BGR233:
-               *depth = 8;
-               *bpp = 8;
-               break;
-       case DRM_FORMAT_XRGB1555:
-       case DRM_FORMAT_XBGR1555:
-       case DRM_FORMAT_RGBX5551:
-       case DRM_FORMAT_BGRX5551:
-       case DRM_FORMAT_ARGB1555:
-       case DRM_FORMAT_ABGR1555:
-       case DRM_FORMAT_RGBA5551:
-       case DRM_FORMAT_BGRA5551:
-               *depth = 15;
-               *bpp = 16;
-               break;
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_BGR565:
-               *depth = 16;
-               *bpp = 16;
-               break;
-       case DRM_FORMAT_RGB888:
-       case DRM_FORMAT_BGR888:
-               *depth = 24;
-               *bpp = 24;
-               break;
-       case DRM_FORMAT_XRGB8888:
-       case DRM_FORMAT_XBGR8888:
-       case DRM_FORMAT_RGBX8888:
-       case DRM_FORMAT_BGRX8888:
-               *depth = 24;
-               *bpp = 32;
-               break;
-       case DRM_FORMAT_XRGB2101010:
-       case DRM_FORMAT_XBGR2101010:
-       case DRM_FORMAT_RGBX1010102:
-       case DRM_FORMAT_BGRX1010102:
-       case DRM_FORMAT_ARGB2101010:
-       case DRM_FORMAT_ABGR2101010:
-       case DRM_FORMAT_RGBA1010102:
-       case DRM_FORMAT_BGRA1010102:
-               *depth = 30;
-               *bpp = 32;
-               break;
-       case DRM_FORMAT_ARGB8888:
-       case DRM_FORMAT_ABGR8888:
-       case DRM_FORMAT_RGBA8888:
-       case DRM_FORMAT_BGRA8888:
-               *depth = 32;
-               *bpp = 32;
-               break;
-       default:
-               DRM_DEBUG_KMS("unsupported pixel format %s\n",
-                             drm_get_format_name(format));
-               *depth = 0;
-               *bpp = 0;
-               break;
-       }
-}
-EXPORT_SYMBOL(drm_fb_get_bpp_depth);
-
-/**
- * drm_format_num_planes - get the number of planes for format
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The number of planes used by the specified pixel format.
- */
-int drm_format_num_planes(uint32_t format)
-{
-       switch (format) {
-       case DRM_FORMAT_YUV410:
-       case DRM_FORMAT_YVU410:
-       case DRM_FORMAT_YUV411:
-       case DRM_FORMAT_YVU411:
-       case DRM_FORMAT_YUV420:
-       case DRM_FORMAT_YVU420:
-       case DRM_FORMAT_YUV422:
-       case DRM_FORMAT_YVU422:
-       case DRM_FORMAT_YUV444:
-       case DRM_FORMAT_YVU444:
-               return 3;
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_NV21:
-       case DRM_FORMAT_NV16:
-       case DRM_FORMAT_NV61:
-       case DRM_FORMAT_NV24:
-       case DRM_FORMAT_NV42:
-               return 2;
-       default:
-               return 1;
-       }
-}
-EXPORT_SYMBOL(drm_format_num_planes);
-
-/**
- * drm_format_plane_cpp - determine the bytes per pixel value
- * @format: pixel format (DRM_FORMAT_*)
- * @plane: plane index
- *
- * Returns:
- * The bytes per pixel value for the specified plane.
- */
-int drm_format_plane_cpp(uint32_t format, int plane)
-{
-       unsigned int depth;
-       int bpp;
-
-       if (plane >= drm_format_num_planes(format))
-               return 0;
-
-       switch (format) {
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-               return 2;
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_NV21:
-       case DRM_FORMAT_NV16:
-       case DRM_FORMAT_NV61:
-       case DRM_FORMAT_NV24:
-       case DRM_FORMAT_NV42:
-               return plane ? 2 : 1;
-       case DRM_FORMAT_YUV410:
-       case DRM_FORMAT_YVU410:
-       case DRM_FORMAT_YUV411:
-       case DRM_FORMAT_YVU411:
-       case DRM_FORMAT_YUV420:
-       case DRM_FORMAT_YVU420:
-       case DRM_FORMAT_YUV422:
-       case DRM_FORMAT_YVU422:
-       case DRM_FORMAT_YUV444:
-       case DRM_FORMAT_YVU444:
-               return 1;
-       default:
-               drm_fb_get_bpp_depth(format, &depth, &bpp);
-               return bpp >> 3;
-       }
-}
-EXPORT_SYMBOL(drm_format_plane_cpp);
-
-/**
- * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The horizontal chroma subsampling factor for the
- * specified pixel format.
- */
-int drm_format_horz_chroma_subsampling(uint32_t format)
-{
-       switch (format) {
-       case DRM_FORMAT_YUV411:
-       case DRM_FORMAT_YVU411:
-       case DRM_FORMAT_YUV410:
-       case DRM_FORMAT_YVU410:
-               return 4;
-       case DRM_FORMAT_YUYV:
-       case DRM_FORMAT_YVYU:
-       case DRM_FORMAT_UYVY:
-       case DRM_FORMAT_VYUY:
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_NV21:
-       case DRM_FORMAT_NV16:
-       case DRM_FORMAT_NV61:
-       case DRM_FORMAT_YUV422:
-       case DRM_FORMAT_YVU422:
-       case DRM_FORMAT_YUV420:
-       case DRM_FORMAT_YVU420:
-               return 2;
-       default:
-               return 1;
-       }
-}
-EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
-
-/**
- * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
- * @format: pixel format (DRM_FORMAT_*)
- *
- * Returns:
- * The vertical chroma subsampling factor for the
- * specified pixel format.
- */
-int drm_format_vert_chroma_subsampling(uint32_t format)
-{
-       switch (format) {
-       case DRM_FORMAT_YUV410:
-       case DRM_FORMAT_YVU410:
-               return 4;
-       case DRM_FORMAT_YUV420:
-       case DRM_FORMAT_YVU420:
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_NV21:
-               return 2;
-       default:
-               return 1;
-       }
-}
-EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
-
-/**
- * drm_format_plane_width - width of the plane given the first plane
- * @width: width of the first plane
- * @format: pixel format
- * @plane: plane index
- *
- * Returns:
- * The width of @plane, given that the width of the first plane is @width.
- */
-int drm_format_plane_width(int width, uint32_t format, int plane)
-{
-       if (plane >= drm_format_num_planes(format))
-               return 0;
-
-       if (plane == 0)
-               return width;
-
-       return width / drm_format_horz_chroma_subsampling(format);
-}
-EXPORT_SYMBOL(drm_format_plane_width);
-
-/**
- * drm_format_plane_height - height of the plane given the first plane
- * @height: height of the first plane
- * @format: pixel format
- * @plane: plane index
- *
- * Returns:
- * The height of @plane, given that the height of the first plane is @height.
- */
-int drm_format_plane_height(int height, uint32_t format, int plane)
-{
-       if (plane >= drm_format_num_planes(format))
-               return 0;
-
-       if (plane == 0)
-               return height;
-
-       return height / drm_format_vert_chroma_subsampling(format);
-}
-EXPORT_SYMBOL(drm_format_plane_height);
-
 /**
  * drm_rotation_simplify() - Try to simplify the rotation
  * @rotation: Rotation to be simplified
@@ -6065,3 +5900,48 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
        return tg;
 }
 EXPORT_SYMBOL(drm_mode_create_tile_group);
+
+/**
+ * drm_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @has_ctm: whether to attach ctm_property for CSC matrix
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction
+ * properties on a CRTC. This includes 3 degamma, csc and gamma
+ * properties that userspace can set and 2 size properties to inform
+ * the userspace of the lut sizes. Each of the properties are
+ * optional. The gamma and degamma properties are only attached if
+ * their size is not 0 and ctm_property is only attached if has_ctm is
+ * true.
+ */
+void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+                               uint degamma_lut_size,
+                               bool has_ctm,
+                               uint gamma_lut_size)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *config = &dev->mode_config;
+
+       if (degamma_lut_size) {
+               drm_object_attach_property(&crtc->base,
+                                          config->degamma_lut_property, 0);
+               drm_object_attach_property(&crtc->base,
+                                          config->degamma_lut_size_property,
+                                          degamma_lut_size);
+       }
+
+       if (has_ctm)
+               drm_object_attach_property(&crtc->base,
+                                          config->ctm_property, 0);
+
+       if (gamma_lut_size) {
+               drm_object_attach_property(&crtc->base,
+                                          config->gamma_lut_property, 0);
+               drm_object_attach_property(&crtc->base,
+                                          config->gamma_lut_size_property,
+                                          gamma_lut_size);
+       }
+}
+EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
index a6e42433ef0e0d5cde2d87739fb778e59336b705..604d3ef72ffab53552918c3cf6055ed7712f1b93 100644 (file)
@@ -232,6 +232,9 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
  */
 void drm_helper_disable_unused_functions(struct drm_device *dev)
 {
+       if (drm_core_check_feature(dev, DRIVER_ATOMIC))
+               DRM_ERROR("Called for atomic driver, this is not what you want.\n");
+
        drm_modeset_lock_all(dev);
        __drm_helper_disable_unused_functions(dev);
        drm_modeset_unlock_all(dev);
@@ -528,11 +531,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
        struct drm_device *dev;
-       struct drm_crtc *new_crtc;
-       struct drm_encoder *save_encoders, *new_encoder, *encoder;
+       struct drm_crtc **save_encoder_crtcs, *new_crtc;
+       struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
        bool mode_changed = false; /* if true do a full mode set */
        bool fb_changed = false; /* if true and !mode_changed just do a flip */
-       struct drm_connector *save_connectors, *connector;
+       struct drm_connector *connector;
        int count = 0, ro, fail = 0;
        const struct drm_crtc_helper_funcs *crtc_funcs;
        struct drm_mode_set save_set;
@@ -574,15 +577,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
         * Allocate space for the backup of all (non-pointer) encoder and
         * connector data.
         */
-       save_encoders = kzalloc(dev->mode_config.num_encoder *
-                               sizeof(struct drm_encoder), GFP_KERNEL);
-       if (!save_encoders)
+       save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
+                               sizeof(struct drm_crtc *), GFP_KERNEL);
+       if (!save_encoder_crtcs)
                return -ENOMEM;
 
-       save_connectors = kzalloc(dev->mode_config.num_connector *
-                               sizeof(struct drm_connector), GFP_KERNEL);
-       if (!save_connectors) {
-               kfree(save_encoders);
+       save_connector_encoders = kzalloc(dev->mode_config.num_connector *
+                               sizeof(struct drm_encoder *), GFP_KERNEL);
+       if (!save_connector_encoders) {
+               kfree(save_encoder_crtcs);
                return -ENOMEM;
        }
 
@@ -593,12 +596,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
         */
        count = 0;
        drm_for_each_encoder(encoder, dev) {
-               save_encoders[count++] = *encoder;
+               save_encoder_crtcs[count++] = encoder->crtc;
        }
 
        count = 0;
        drm_for_each_connector(connector, dev) {
-               save_connectors[count++] = *connector;
+               save_connector_encoders[count++] = connector->encoder;
        }
 
        save_set.crtc = set->crtc;
@@ -631,8 +634,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                mode_changed = true;
        }
 
-       /* take a reference on all connectors in set */
+       /* take a reference on all unbound connectors in set, reuse the
+        * already taken reference for bound connectors
+        */
        for (ro = 0; ro < set->num_connectors; ro++) {
+               if (set->connectors[ro]->encoder)
+                       continue;
                drm_connector_reference(set->connectors[ro]);
        }
 
@@ -754,30 +761,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                }
        }
 
-       /* after fail drop reference on all connectors in save set */
-       count = 0;
-       drm_for_each_connector(connector, dev) {
-               drm_connector_unreference(&save_connectors[count++]);
-       }
-
-       kfree(save_connectors);
-       kfree(save_encoders);
+       kfree(save_connector_encoders);
+       kfree(save_encoder_crtcs);
        return 0;
 
 fail:
        /* Restore all previous data. */
        count = 0;
        drm_for_each_encoder(encoder, dev) {
-               *encoder = save_encoders[count++];
+               encoder->crtc = save_encoder_crtcs[count++];
        }
 
        count = 0;
        drm_for_each_connector(connector, dev) {
-               *connector = save_connectors[count++];
+               connector->encoder = save_connector_encoders[count++];
        }
 
-       /* after fail drop reference on all connectors in set */
+       /* after fail drop reference on all unbound connectors in set, let
+        * bound connectors keep their reference
+        */
        for (ro = 0; ro < set->num_connectors; ro++) {
+               if (set->connectors[ro]->encoder)
+                       continue;
                drm_connector_unreference(set->connectors[ro]);
        }
 
@@ -787,8 +792,8 @@ fail:
                                      save_set.y, save_set.fb))
                DRM_ERROR("failed to restore config after modeset failure\n");
 
-       kfree(save_connectors);
-       kfree(save_encoders);
+       kfree(save_connector_encoders);
+       kfree(save_encoder_crtcs);
        return ret;
 }
 EXPORT_SYMBOL(drm_crtc_helper_set_config);
@@ -1121,36 +1126,3 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        return drm_plane_helper_commit(plane, plane_state, old_fb);
 }
 EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
-
-/**
- * drm_helper_crtc_enable_color_mgmt - enable color management properties
- * @crtc: DRM CRTC
- * @degamma_lut_size: the size of the degamma lut (before CSC)
- * @gamma_lut_size: the size of the gamma lut (after CSC)
- *
- * This function lets the driver enable the color correction properties on a
- * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
- * set and 2 size properties to inform the userspace of the lut sizes.
- */
-void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
-                                      int degamma_lut_size,
-                                      int gamma_lut_size)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_mode_config *config = &dev->mode_config;
-
-       drm_object_attach_property(&crtc->base,
-                                  config->degamma_lut_property, 0);
-       drm_object_attach_property(&crtc->base,
-                                  config->ctm_property, 0);
-       drm_object_attach_property(&crtc->base,
-                                  config->gamma_lut_property, 0);
-
-       drm_object_attach_property(&crtc->base,
-                                  config->degamma_lut_size_property,
-                                  degamma_lut_size);
-       drm_object_attach_property(&crtc->base,
-                                  config->gamma_lut_size_property,
-                                  gamma_lut_size);
-}
-EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
index a78c138282ea7a90a0bacd09a127ada643db1a16..47a500b90fd74370c14b719d74035699556dd578 100644 (file)
  * and are not exported to drivers.
  */
 
+
+/* drm_crtc.c */
+void drm_connector_ida_init(void);
+void drm_connector_ida_destroy(void);
 int drm_mode_object_get(struct drm_device *dev,
                        struct drm_mode_object *obj, uint32_t obj_type);
 void drm_mode_object_unregister(struct drm_device *dev,
                                struct drm_mode_object *object);
+bool drm_property_change_valid_get(struct drm_property *property,
+                                  uint64_t value,
+                                  struct drm_mode_object **ref);
+void drm_property_change_valid_put(struct drm_property *property,
+                                  struct drm_mode_object *ref);
+
+int drm_plane_check_pixel_format(const struct drm_plane *plane,
+                                u32 format);
+int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+                           int x, int y,
+                           const struct drm_display_mode *mode,
+                           const struct drm_framebuffer *fb);
+
+void drm_fb_release(struct drm_file *file_priv);
+void drm_property_destroy_user_blobs(struct drm_device *dev,
+                                    struct drm_file *file_priv);
+
+/* dumb buffer support IOCTLs */
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv);
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv);
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+                               void *data, struct drm_file *file_priv);
+
+/* framebuffer IOCTLs */
+extern int drm_mode_addfb(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb2(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv);
+int drm_mode_rmfb(struct drm_device *dev,
+                        void *data, struct drm_file *file_priv);
+int drm_mode_getfb(struct drm_device *dev,
+                  void *data, struct drm_file *file_priv);
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv);
+
+/* IOCTLs */
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+                                     struct drm_file *file_priv);
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+                                   struct drm_file *file_priv);
+
+int drm_mode_getresources(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+                         struct drm_file *file_priv);
+int drm_mode_getcrtc(struct drm_device *dev,
+                    void *data, struct drm_file *file_priv);
+int drm_mode_getconnector(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+int drm_mode_setcrtc(struct drm_device *dev,
+                    void *data, struct drm_file *file_priv);
+int drm_mode_getplane(struct drm_device *dev,
+                     void *data, struct drm_file *file_priv);
+int drm_mode_setplane(struct drm_device *dev,
+                     void *data, struct drm_file *file_priv);
+int drm_mode_cursor_ioctl(struct drm_device *dev,
+                         void *data, struct drm_file *file_priv);
+int drm_mode_cursor2_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv);
+int drm_mode_getproperty_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv);
+int drm_mode_getblob_ioctl(struct drm_device *dev,
+                          void *data, struct drm_file *file_priv);
+int drm_mode_createblob_ioctl(struct drm_device *dev,
+                             void *data, struct drm_file *file_priv);
+int drm_mode_destroyblob_ioctl(struct drm_device *dev,
+                              void *data, struct drm_file *file_priv);
+int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+                                         void *data, struct drm_file *file_priv);
+int drm_mode_getencoder(struct drm_device *dev,
+                       void *data, struct drm_file *file_priv);
+int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv);
+int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv);
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+                            void *data, struct drm_file *file_priv);
 
 /* drm_atomic.c */
 int drm_atomic_get_property(struct drm_mode_object *obj,
-                          struct drm_property *property, uint64_t *val);
+                           struct drm_property *property, uint64_t *val);
 int drm_mode_atomic_ioctl(struct drm_device *dev,
                          void *data, struct drm_file *file_priv);
 
+int drm_modeset_register_all(struct drm_device *dev);
+void drm_modeset_unregister_all(struct drm_device *dev);
index 3bcf8e6a85b355437e7aca009a3a205b9ba8f428..fa10cef2ba37dff406a69b27067025ca77baa045 100644 (file)
 
 static const struct drm_info_list drm_debugfs_list[] = {
        {"name", drm_name_info, 0},
-       {"vm", drm_vm_info, 0},
        {"clients", drm_clients_info, 0},
-       {"bufs", drm_bufs_info, 0},
        {"gem_names", drm_gem_name_info, DRIVER_GEM},
-       {"vma", drm_vma_info, 0},
 };
 #define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
 
index 3334baacf43d8d98d15546f9bb7019348cc6a2fb..734f86a345f628edab95c7ab3c8652a4d9927e7b 100644 (file)
@@ -355,8 +355,7 @@ int drm_dp_aux_dev_init(void)
 
        drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
        if (IS_ERR(drm_dp_aux_dev_class)) {
-               res = PTR_ERR(drm_dp_aux_dev_class);
-               goto out;
+               return PTR_ERR(drm_dp_aux_dev_class);
        }
        drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
 
index eeaf5a7c3aa767a6676b9b6054a020b3fea87445..8f11b8741e425720b9ad25c622da7fa5b9bfbf48 100644 (file)
@@ -203,7 +203,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
 
                ret = aux->transfer(aux, &msg);
 
-               if (ret > 0) {
+               if (ret >= 0) {
                        native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
                        if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
                                if (ret == size)
@@ -708,8 +708,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
 
        memset(&msg, 0, sizeof(msg));
 
-       mutex_lock(&aux->hw_mutex);
-
        for (i = 0; i < num; i++) {
                msg.address = msgs[i].addr;
                drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -764,8 +762,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
        msg.size = 0;
        (void)drm_dp_i2c_do_msg(aux, &msg);
 
-       mutex_unlock(&aux->hw_mutex);
-
        return err;
 }
 
@@ -774,22 +770,64 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
        .master_xfer = drm_dp_i2c_xfer,
 };
 
+static struct drm_dp_aux *i2c_to_aux(struct i2c_adapter *i2c)
+{
+       return container_of(i2c, struct drm_dp_aux, ddc);
+}
+
+static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+       mutex_lock(&i2c_to_aux(i2c)->hw_mutex);
+}
+
+static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+       return mutex_trylock(&i2c_to_aux(i2c)->hw_mutex);
+}
+
+static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
+{
+       mutex_unlock(&i2c_to_aux(i2c)->hw_mutex);
+}
+
 /**
- * drm_dp_aux_register() - initialise and register aux channel
+ * drm_dp_aux_init() - minimally initialise an aux channel
  * @aux: DisplayPort AUX channel
  *
- * Returns 0 on success or a negative error code on failure.
+ * If you need to use the drm_dp_aux's i2c adapter prior to registering it
+ * with the outside world, call drm_dp_aux_init() first. You must still
+ * call drm_dp_aux_register() once the connector has been registered to
+ * allow userspace access to the auxiliary DP channel.
  */
-int drm_dp_aux_register(struct drm_dp_aux *aux)
+void drm_dp_aux_init(struct drm_dp_aux *aux)
 {
-       int ret;
-
        mutex_init(&aux->hw_mutex);
 
        aux->ddc.algo = &drm_dp_i2c_algo;
        aux->ddc.algo_data = aux;
        aux->ddc.retries = 3;
 
+       aux->ddc.lock_bus = lock_bus;
+       aux->ddc.trylock_bus = trylock_bus;
+       aux->ddc.unlock_bus = unlock_bus;
+}
+EXPORT_SYMBOL(drm_dp_aux_init);
+
+/**
+ * drm_dp_aux_register() - initialise and register aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Automatically calls drm_dp_aux_init() if this hasn't been done yet.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_aux_register(struct drm_dp_aux *aux)
+{
+       int ret;
+
+       if (!aux->ddc.algo)
+               drm_dp_aux_init(aux);
+
        aux->ddc.class = I2C_CLASS_DDC;
        aux->ddc.owner = THIS_MODULE;
        aux->ddc.dev.parent = aux->dev;
index a13edf5de2d6daafc0e89849a6cdd1c324835345..04e457117980b8d554e3561139f52cdc53dedc0d 100644 (file)
@@ -1493,11 +1493,8 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
        WARN_ON(!mutex_is_locked(&mgr->qlock));
 
        /* construct a chunk from the first msg in the tx_msg queue */
-       if (list_empty(&mgr->tx_msg_downq)) {
-               mgr->tx_down_in_progress = false;
+       if (list_empty(&mgr->tx_msg_downq))
                return;
-       }
-       mgr->tx_down_in_progress = true;
 
        txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
        ret = process_single_tx_qlock(mgr, txmsg, false);
@@ -1512,10 +1509,6 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
                txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
                wake_up(&mgr->tx_waitq);
        }
-       if (list_empty(&mgr->tx_msg_downq)) {
-               mgr->tx_down_in_progress = false;
-               return;
-       }
 }
 
 /* called holding qlock */
@@ -1538,7 +1531,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
 {
        mutex_lock(&mgr->qlock);
        list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
-       if (!mgr->tx_down_in_progress)
+       if (list_is_singular(&mgr->tx_msg_downq))
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -2372,6 +2365,7 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
 
 /**
  * drm_dp_mst_detect_port() - get connection status for an MST port
+ * @connector: DRM connector for this port
  * @mgr: manager for this port
  * @port: unverified pointer to a port
  *
@@ -2887,7 +2881,7 @@ static void drm_dp_tx_work(struct work_struct *work)
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
        mutex_lock(&mgr->qlock);
-       if (mgr->tx_down_in_progress)
+       if (!list_empty(&mgr->tx_msg_downq))
                process_single_down_tx_qlock(mgr);
        mutex_unlock(&mgr->qlock);
 }
@@ -2927,11 +2921,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                drm_dp_port_teardown_pdt(port, port->pdt);
 
                if (!port->input && port->vcpi.vcpi > 0) {
-                       if (mgr->mst_state) {
-                               drm_dp_mst_reset_vcpi_slots(mgr, port);
-                               drm_dp_update_payload_part1(mgr);
-                               drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
-                       }
+                       drm_dp_mst_reset_vcpi_slots(mgr, port);
+                       drm_dp_update_payload_part1(mgr);
+                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
                }
 
                kref_put(&port->kref, drm_dp_free_mst_port);
index bff89226a344bb292071d26dff56efb98760f951..be27ed36f56e1bc13418b44f07d82531634cf4c4 100644 (file)
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/drm_core.h>
+#include "drm_crtc_internal.h"
 #include "drm_legacy.h"
 #include "drm_internal.h"
+#include "drm_crtc_internal.h"
 
 /*
  * drm_debug: Enable debug output.
@@ -93,114 +95,6 @@ void drm_ut_debug_printk(const char *function_name, const char *format, ...)
 }
 EXPORT_SYMBOL(drm_ut_debug_printk);
 
-struct drm_master *drm_master_create(struct drm_minor *minor)
-{
-       struct drm_master *master;
-
-       master = kzalloc(sizeof(*master), GFP_KERNEL);
-       if (!master)
-               return NULL;
-
-       kref_init(&master->refcount);
-       spin_lock_init(&master->lock.spinlock);
-       init_waitqueue_head(&master->lock.lock_queue);
-       idr_init(&master->magic_map);
-       master->minor = minor;
-
-       return master;
-}
-
-struct drm_master *drm_master_get(struct drm_master *master)
-{
-       kref_get(&master->refcount);
-       return master;
-}
-EXPORT_SYMBOL(drm_master_get);
-
-static void drm_master_destroy(struct kref *kref)
-{
-       struct drm_master *master = container_of(kref, struct drm_master, refcount);
-       struct drm_device *dev = master->minor->dev;
-
-       if (dev->driver->master_destroy)
-               dev->driver->master_destroy(dev, master);
-
-       drm_legacy_master_rmmaps(dev, master);
-
-       idr_destroy(&master->magic_map);
-       kfree(master->unique);
-       kfree(master);
-}
-
-void drm_master_put(struct drm_master **master)
-{
-       kref_put(&(*master)->refcount, drm_master_destroy);
-       *master = NULL;
-}
-EXPORT_SYMBOL(drm_master_put);
-
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv)
-{
-       int ret = 0;
-
-       mutex_lock(&dev->master_mutex);
-       if (file_priv->is_master)
-               goto out_unlock;
-
-       if (file_priv->minor->master) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
-       if (!file_priv->master) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
-       if (!file_priv->allowed_master) {
-               ret = drm_new_set_master(dev, file_priv);
-               goto out_unlock;
-       }
-
-       file_priv->minor->master = drm_master_get(file_priv->master);
-       file_priv->is_master = 1;
-       if (dev->driver->master_set) {
-               ret = dev->driver->master_set(dev, file_priv, false);
-               if (unlikely(ret != 0)) {
-                       file_priv->is_master = 0;
-                       drm_master_put(&file_priv->minor->master);
-               }
-       }
-
-out_unlock:
-       mutex_unlock(&dev->master_mutex);
-       return ret;
-}
-
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       int ret = -EINVAL;
-
-       mutex_lock(&dev->master_mutex);
-       if (!file_priv->is_master)
-               goto out_unlock;
-
-       if (!file_priv->minor->master)
-               goto out_unlock;
-
-       ret = 0;
-       if (dev->driver->master_drop)
-               dev->driver->master_drop(dev, file_priv, false);
-       drm_master_put(&file_priv->minor->master);
-       file_priv->is_master = 0;
-
-out_unlock:
-       mutex_unlock(&dev->master_mutex);
-       return ret;
-}
-
 /*
  * DRM Minors
  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
@@ -405,10 +299,9 @@ void drm_minor_release(struct drm_minor *minor)
  * callbacks implemented by the driver. The driver then needs to initialize all
  * the various subsystems for the drm device like memory management, vblank
  * handling, modesetting support and intial output configuration plus obviously
- * initialize all the corresponding hardware bits. An important part of this is
- * also calling drm_dev_set_unique() to set the userspace-visible unique name of
- * this device instance. Finally when everything is up and running and ready for
- * userspace the device instance can be published using drm_dev_register().
+ * initialize all the corresponding hardware bits. Finally when everything is up
+ * and running and ready for userspace the device instance can be published
+ * using drm_dev_register().
  *
  * There is also deprecated support for initalizing device instances using
  * bus-specific helpers and the ->load() callback. But due to
@@ -430,6 +323,14 @@ void drm_minor_release(struct drm_minor *minor)
  * dev_priv field of &drm_device.
  */
 
+static int drm_dev_set_unique(struct drm_device *dev, const char *name)
+{
+       kfree(dev->unique);
+       dev->unique = kstrdup(name, GFP_KERNEL);
+
+       return dev->unique ? 0 : -ENOMEM;
+}
+
 /**
  * drm_put_dev - Unregister and release a DRM device
  * @dev: DRM device
@@ -461,9 +362,7 @@ EXPORT_SYMBOL(drm_put_dev);
 void drm_unplug_dev(struct drm_device *dev)
 {
        /* for a USB device */
-       drm_minor_unregister(dev, DRM_MINOR_LEGACY);
-       drm_minor_unregister(dev, DRM_MINOR_RENDER);
-       drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+       drm_dev_unregister(dev);
 
        mutex_lock(&drm_global_mutex);
 
@@ -549,11 +448,12 @@ static void drm_fs_inode_free(struct inode *inode)
 }
 
 /**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
+ * drm_dev_init - Initialise new DRM device
+ * @dev: DRM device
+ * @driver: DRM driver
  * @parent: Parent device object
  *
- * Allocate and initialize a new DRM device. No device registration is done.
+ * Initialize a new DRM device. No device registration is done.
  * Call drm_dev_register() to advertice the device to user space and register it
  * with other core subsystems. This should be done last in the device
  * initialization sequence to make sure userspace can't access an inconsistent
@@ -564,19 +464,18 @@ static void drm_fs_inode_free(struct inode *inode)
  *
  * Note that for purely virtual devices @parent can be NULL.
  *
+ * Drivers that do not want to allocate their own device struct
+ * embedding struct &drm_device can call drm_dev_alloc() instead.
+ *
  * RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
+ * 0 on success, or error code on failure.
  */
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
-                                struct device *parent)
+int drm_dev_init(struct drm_device *dev,
+                struct drm_driver *driver,
+                struct device *parent)
 {
-       struct drm_device *dev;
        int ret;
 
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev)
-               return NULL;
-
        kref_init(&dev->ref);
        dev->dev = parent;
        dev->driver = driver;
@@ -605,8 +504,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
                if (ret)
                        goto err_minors;
-
-               WARN_ON(driver->suspend || driver->resume);
        }
 
        if (drm_core_check_feature(dev, DRIVER_RENDER)) {
@@ -619,7 +516,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
        if (ret)
                goto err_minors;
 
-       if (drm_ht_create(&dev->map_hash, 12))
+       ret = drm_ht_create(&dev->map_hash, 12);
+       if (ret)
                goto err_minors;
 
        drm_legacy_ctxbitmap_init(dev);
@@ -632,13 +530,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                }
        }
 
-       if (parent) {
-               ret = drm_dev_set_unique(dev, dev_name(parent));
-               if (ret)
-                       goto err_setunique;
-       }
+       /* Use the parent device name as DRM device unique identifier, but fall
+        * back to the driver name for virtual devices like vgem. */
+       ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
+       if (ret)
+               goto err_setunique;
 
-       return dev;
+       return 0;
 
 err_setunique:
        if (drm_core_check_feature(dev, DRIVER_GEM))
@@ -653,8 +551,49 @@ err_minors:
        drm_fs_inode_free(dev->anon_inode);
 err_free:
        mutex_destroy(&dev->master_mutex);
-       kfree(dev);
-       return NULL;
+       return ret;
+}
+EXPORT_SYMBOL(drm_dev_init);
+
+/**
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems. This should be done last in the device
+ * initialization sequence to make sure userspace can't access an inconsistent
+ * state.
+ *
+ * The initial ref-count of the object is 1. Use drm_dev_ref() and
+ * drm_dev_unref() to take and drop further ref-counts.
+ *
+ * Note that for purely virtual devices @parent can be NULL.
+ *
+ * Drivers that wish to subclass or embed struct &drm_device into their
+ * own struct should look at using drm_dev_init() instead.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
+ */
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+                                struct device *parent)
+{
+       struct drm_device *dev;
+       int ret;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return NULL;
+
+       ret = drm_dev_init(dev, driver, parent);
+       if (ret) {
+               kfree(dev);
+               return NULL;
+       }
+
+       return dev;
 }
 EXPORT_SYMBOL(drm_dev_alloc);
 
@@ -718,11 +657,7 @@ EXPORT_SYMBOL(drm_dev_unref);
  *
  * Register the DRM device @dev with the system, advertise device to user-space
  * and start normal device operation. @dev must be allocated via drm_dev_alloc()
- * previously. Right after drm_dev_register() the driver should call
- * drm_connector_register_all() to register all connectors in sysfs. This is
- * a separate call for backward compatibility with drivers still using
- * the deprecated ->load() callback, where connectors are registered from within
- * the ->load() callback.
+ * previously.
  *
  * Never call this twice on any device!
  *
@@ -759,6 +694,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
                        goto err_minors;
        }
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_modeset_register_all(dev);
+
        ret = 0;
        goto out_unlock;
 
@@ -789,6 +727,9 @@ void drm_dev_unregister(struct drm_device *dev)
 
        drm_lastclose(dev);
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_modeset_unregister_all(dev);
+
        if (dev->driver->unload)
                dev->driver->unload(dev);
 
@@ -806,26 +747,6 @@ void drm_dev_unregister(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_dev_unregister);
 
-/**
- * drm_dev_set_unique - Set the unique name of a DRM device
- * @dev: device of which to set the unique name
- * @name: unique name
- *
- * Sets the unique name of a DRM device using the specified string. Drivers
- * can use this at driver probe time if the unique name of the devices they
- * drive is static.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_dev_set_unique(struct drm_device *dev, const char *name)
-{
-       kfree(dev->unique);
-       dev->unique = kstrdup(name, GFP_KERNEL);
-
-       return dev->unique ? 0 : -ENOMEM;
-}
-EXPORT_SYMBOL(drm_dev_set_unique);
-
 /*
  * DRM Core
  * The DRM core module initializes all global DRM objects and makes them
index 9a401aed98e058cd05558f5146c38eee862cbd07..622f788bff4603f7c97def996dc2838ca5e3dee5 100644 (file)
@@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
         * by commas, search through the list looking for one that
         * matches the connector.
         *
-        * If there's one or more that don't't specify a connector, keep
+        * If there's one or more that doesn't specify a connector, keep
         * the last one found one as a fallback.
         */
        fwstr = kstrdup(edid_firmware, GFP_KERNEL);
index 172cafe11c7109397f554d50fcc1da9ba550937f..1fd6eac1400c1c92bae8edaac6a622a322d7ace0 100644 (file)
@@ -23,6 +23,7 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
+#include <linux/dma-mapping.h>
 #include <linux/module.h>
 
 #define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -52,7 +53,7 @@ struct drm_fbdev_cma {
  * will be set up automatically. dirty() is called by
  * drm_fb_helper_deferred_io() in process context (struct delayed_work).
  *
- * Example fbdev deferred io code:
+ * Example fbdev deferred io code::
  *
  *     static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
  *                                      struct drm_file *file_priv,
@@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
  * drm_fb_cma_create_with_funcs() - helper function for the
  *                                  &drm_mode_config_funcs ->fb_create
  *                                  callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
+ * @funcs: vtable to be used for the new framebuffer object
  *
  * This can be used to set &drm_framebuffer_funcs for drivers that need the
  * dirty() callback. Use drm_fb_cma_create() if you don't need to change
@@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
 
 /**
  * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
+ * @dev: DRM device
+ * @file_priv: drm file for the ioctl call
+ * @mode_cmd: metadata from the userspace fb creation request
  *
  * If your hardware has special alignment or pitch requirements these should be
  * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
@@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
  * This function will usually be called from the CRTC callback functions.
  */
 struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
-       unsigned int plane)
+                                                 unsigned int plane)
 {
        struct drm_fb_cma *fb_cma = to_fb_cma(fb);
 
@@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
 EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
 
 #ifdef CONFIG_DEBUG_FS
-/*
- * drm_fb_cma_describe() - Helper to dump information about a single
- * CMA framebuffer object
- */
 static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
        struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
 
 /**
  * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
- * in debugfs.
+ *                            in debugfs.
+ * @m: output file
+ * @arg: private data for the callback
  */
 int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
 {
@@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
 EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
 #endif
 
+static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+       return dma_mmap_writecombine(info->device, vma, info->screen_base,
+                                    info->fix.smem_start, info->fix.smem_len);
+}
+
 static struct fb_ops drm_fbdev_cma_ops = {
        .owner          = THIS_MODULE,
        .fb_fillrect    = drm_fb_helper_sys_fillrect,
@@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = {
        .fb_blank       = drm_fb_helper_blank,
        .fb_pan_display = drm_fb_helper_pan_display,
        .fb_setcmap     = drm_fb_helper_setcmap,
+       .fb_mmap        = drm_fb_cma_mmap,
 };
 
 static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
@@ -333,6 +346,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
        fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
        if (!fbdefio || !fbops) {
                kfree(fbdefio);
+               kfree(fbops);
                return -ENOMEM;
        }
 
@@ -445,7 +459,7 @@ err_cma_destroy:
 err_fb_info_destroy:
        drm_fb_helper_release_fbi(helper);
 err_gem_free_object:
-       dev->driver->gem_free_object(&obj->base);
+       drm_gem_object_unreference_unlocked(&obj->base);
        return ret;
 }
 EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
@@ -582,3 +596,18 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
                drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
 }
 EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
+
+/**
+ * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
+ * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
+ * @state: desired state, zero to resume, non-zero to suspend
+ *
+ * Calls drm_fb_helper_set_suspend, which is a wrapper around
+ * fb_set_suspend implemented by fbdev core.
+ */
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
+{
+       if (fbdev_cma)
+               drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
+}
+EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
index 7c2eb75db60f102d9526aebab1a6f7993e124f9b..ce54e985d91ba0b2ee11000c6aa499cffb4a1bba 100644 (file)
@@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
        g_base = r_base + crtc->gamma_size;
        b_base = g_base + crtc->gamma_size;
 
-       crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+       crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
 }
 
 /**
@@ -385,7 +385,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 
        drm_warn_on_modeset_not_all_locked(dev);
 
-       if (fb_helper->atomic)
+       if (dev->mode_config.funcs->atomic_commit)
                return restore_fbdev_mode_atomic(fb_helper);
 
        drm_for_each_plane(plane, dev) {
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
 
        /* Sometimes user space wants everything disabled, so don't steal the
         * display if there's a master. */
-       if (dev->primary->master)
+       if (lockless_dereference(dev->master))
                return false;
 
        drm_for_each_crtc(crtc, dev) {
@@ -716,8 +716,6 @@ int drm_fb_helper_init(struct drm_device *dev,
                i++;
        }
 
-       fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
-
        return 0;
 out_free:
        drm_fb_helper_crtc_free(fb_helper);
@@ -1042,7 +1040,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 {
        struct drm_fb_helper *fb_helper = info->par;
        struct drm_framebuffer *fb = fb_helper->fb;
-       int pindex;
 
        if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
                u32 *palette;
@@ -1074,38 +1071,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
                    !fb_helper->funcs->gamma_get))
                return -EINVAL;
 
-       pindex = regno;
-
-       if (fb->bits_per_pixel == 16) {
-               pindex = regno << 3;
-
-               if (fb->depth == 16 && regno > 63)
-                       return -EINVAL;
-               if (fb->depth == 15 && regno > 31)
-                       return -EINVAL;
-
-               if (fb->depth == 16) {
-                       u16 r, g, b;
-                       int i;
-                       if (regno < 32) {
-                               for (i = 0; i < 8; i++)
-                                       fb_helper->funcs->gamma_set(crtc, red,
-                                               green, blue, pindex + i);
-                       }
+       WARN_ON(fb->bits_per_pixel != 8);
 
-                       fb_helper->funcs->gamma_get(crtc, &r,
-                                                   &g, &b,
-                                                   pindex >> 1);
+       fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
 
-                       for (i = 0; i < 4; i++)
-                               fb_helper->funcs->gamma_set(crtc, r,
-                                                           green, b,
-                                                           (pindex >> 1) + i);
-               }
-       }
-
-       if (fb->depth != 16)
-               fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
        return 0;
 }
 
@@ -1373,7 +1342,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
                return -EBUSY;
        }
 
-       if (fb_helper->atomic) {
+       if (dev->mode_config.funcs->atomic_commit) {
                ret = pan_display_atomic(var, info);
                goto unlock;
        }
@@ -2000,7 +1969,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
                my_score++;
 
        connector_funcs = connector->helper_private;
-       encoder = connector_funcs->best_encoder(connector);
+
+       /*
+        * If the DRM device implements atomic hooks and ->best_encoder() is
+        * NULL we fallback to the default drm_atomic_helper_best_encoder()
+        * helper.
+        */
+       if (fb_helper->dev->mode_config.funcs->atomic_commit &&
+           !connector_funcs->best_encoder)
+               encoder = drm_atomic_helper_best_encoder(connector);
+       else
+               encoder = connector_funcs->best_encoder(connector);
+
        if (!encoder)
                goto out;
 
index 7af7f8bcb3558244781c0ac4f486452e852deca9..323c238fcac70426f947977649ccd38442693e03 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/module.h>
 #include "drm_legacy.h"
 #include "drm_internal.h"
+#include "drm_crtc_internal.h"
 
 /* from BKL pushdown */
 DEFINE_MUTEX(drm_global_mutex);
@@ -67,7 +68,7 @@ DEFINE_MUTEX(drm_global_mutex);
  * specific implementations. For GEM-based drivers this is drm_gem_mmap().
  *
  * No other file operations are supported by the DRM userspace API. Overall the
- * following is an example #file_operations structure:
+ * following is an example #file_operations structure::
  *
  *     static const example_drm_fops = {
  *             .owner = THIS_MODULE,
@@ -167,60 +168,6 @@ static int drm_cpu_valid(void)
        return 1;
 }
 
-/*
- * drm_new_set_master - Allocate a new master object and become master for the
- * associated master realm.
- *
- * @dev: The associated device.
- * @fpriv: File private identifying the client.
- *
- * This function must be called with dev::struct_mutex held.
- * Returns negative error code on failure. Zero on success.
- */
-int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
-{
-       struct drm_master *old_master;
-       int ret;
-
-       lockdep_assert_held_once(&dev->master_mutex);
-
-       /* create a new master */
-       fpriv->minor->master = drm_master_create(fpriv->minor);
-       if (!fpriv->minor->master)
-               return -ENOMEM;
-
-       /* take another reference for the copy in the local file priv */
-       old_master = fpriv->master;
-       fpriv->master = drm_master_get(fpriv->minor->master);
-
-       if (dev->driver->master_create) {
-               ret = dev->driver->master_create(dev, fpriv->master);
-               if (ret)
-                       goto out_err;
-       }
-       if (dev->driver->master_set) {
-               ret = dev->driver->master_set(dev, fpriv, true);
-               if (ret)
-                       goto out_err;
-       }
-
-       fpriv->is_master = 1;
-       fpriv->allowed_master = 1;
-       fpriv->authenticated = 1;
-       if (old_master)
-               drm_master_put(&old_master);
-
-       return 0;
-
-out_err:
-       /* drop both references and restore old master on failure */
-       drm_master_put(&fpriv->minor->master);
-       drm_master_put(&fpriv->master);
-       fpriv->master = old_master;
-
-       return ret;
-}
-
 /*
  * Called whenever a process opens /dev/drm.
  *
@@ -283,19 +230,11 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
                        goto out_prime_destroy;
        }
 
-       /* if there is no current master make this fd it, but do not create
-        * any master object for render clients */
-       mutex_lock(&dev->master_mutex);
-       if (drm_is_primary_client(priv) && !priv->minor->master) {
-               /* create a new master */
-               ret = drm_new_set_master(dev, priv);
+       if (drm_is_primary_client(priv)) {
+               ret = drm_master_open(priv);
                if (ret)
                        goto out_close;
-       } else if (drm_is_primary_client(priv)) {
-               /* get a reference to the master */
-               priv->master = drm_master_get(priv->minor->master);
        }
-       mutex_unlock(&dev->master_mutex);
 
        mutex_lock(&dev->filelist_mutex);
        list_add(&priv->lhead, &dev->filelist);
@@ -324,7 +263,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
        return 0;
 
 out_close:
-       mutex_unlock(&dev->master_mutex);
        if (dev->driver->postclose)
                dev->driver->postclose(dev, priv);
 out_prime_destroy:
@@ -338,18 +276,6 @@ out_prime_destroy:
        return ret;
 }
 
-static void drm_master_release(struct drm_device *dev, struct file *filp)
-{
-       struct drm_file *file_priv = filp->private_data;
-
-       if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
-               DRM_DEBUG("File %p released, freeing lock for context %d\n",
-                         filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
-               drm_legacy_lock_free(&file_priv->master->lock,
-                                    _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
-       }
-}
-
 static void drm_events_release(struct drm_file *file_priv)
 {
        struct drm_device *dev = file_priv->minor->dev;
@@ -368,7 +294,7 @@ static void drm_events_release(struct drm_file *file_priv)
        /* Remove unconsumed events */
        list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
                list_del(&e->link);
-               e->destroy(e);
+               kfree(e);
        }
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -451,11 +377,6 @@ int drm_release(struct inode *inode, struct file *filp)
        list_del(&file_priv->lhead);
        mutex_unlock(&dev->filelist_mutex);
 
-       mutex_lock(&dev->struct_mutex);
-       if (file_priv->magic)
-               idr_remove(&file_priv->master->magic_map, file_priv->magic);
-       mutex_unlock(&dev->struct_mutex);
-
        if (dev->driver->preclose)
                dev->driver->preclose(dev, file_priv);
 
@@ -468,9 +389,8 @@ int drm_release(struct inode *inode, struct file *filp)
                  (long)old_encode_dev(file_priv->minor->kdev->devt),
                  dev->open_count);
 
-       /* if the master has gone away we can't do anything with the lock */
-       if (file_priv->minor->master)
-               drm_master_release(dev, filp);
+       if (!drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_legacy_lock_release(dev, filp);
 
        if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
                drm_legacy_reclaim_buffers(dev, file_priv);
@@ -487,43 +407,12 @@ int drm_release(struct inode *inode, struct file *filp)
 
        drm_legacy_ctxbitmap_flush(dev, file_priv);
 
-       mutex_lock(&dev->master_mutex);
-
-       if (file_priv->is_master) {
-               struct drm_master *master = file_priv->master;
-
-               /*
-                * Since the master is disappearing, so is the
-                * possibility to lock.
-                */
-               mutex_lock(&dev->struct_mutex);
-               if (master->lock.hw_lock) {
-                       if (dev->sigdata.lock == master->lock.hw_lock)
-                               dev->sigdata.lock = NULL;
-                       master->lock.hw_lock = NULL;
-                       master->lock.file_priv = NULL;
-                       wake_up_interruptible_all(&master->lock.lock_queue);
-               }
-               mutex_unlock(&dev->struct_mutex);
-
-               if (file_priv->minor->master == file_priv->master) {
-                       /* drop the reference held my the minor */
-                       if (dev->driver->master_drop)
-                               dev->driver->master_drop(dev, file_priv, true);
-                       drm_master_put(&file_priv->minor->master);
-               }
-       }
-
-       /* drop the master reference held by the file priv */
-       if (file_priv->master)
-               drm_master_put(&file_priv->master);
-       file_priv->is_master = 0;
-       mutex_unlock(&dev->master_mutex);
+       if (drm_is_primary_client(file_priv))
+               drm_master_release(file_priv);
 
        if (dev->driver->postclose)
                dev->driver->postclose(dev, file_priv);
 
-
        if (drm_core_check_feature(dev, DRIVER_PRIME))
                drm_prime_destroy_file_private(&file_priv->prime);
 
@@ -636,7 +525,7 @@ put_back_event:
                        }
 
                        ret += length;
-                       e->destroy(e);
+                       kfree(e);
                }
        }
        mutex_unlock(&file_priv->event_read_lock);
@@ -713,9 +602,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev,
        list_add(&p->pending_link, &file_priv->pending_event_list);
        p->file_priv = file_priv;
 
-       /* we *could* pass this in as arg, but everyone uses kfree: */
-       p->destroy = (void (*) (struct drm_pending_event *)) kfree;
-
        return 0;
 }
 EXPORT_SYMBOL(drm_event_reserve_init_locked);
@@ -778,7 +664,7 @@ void drm_event_cancel_free(struct drm_device *dev,
                list_del(&p->pending_link);
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
-       p->destroy(p);
+       kfree(p);
 }
 EXPORT_SYMBOL(drm_event_cancel_free);
 
@@ -800,8 +686,19 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
 {
        assert_spin_locked(&dev->event_lock);
 
+       if (e->completion) {
+               /* ->completion might disappear as soon as it signalled. */
+               complete_all(e->completion);
+               e->completion = NULL;
+       }
+
+       if (e->fence) {
+               fence_signal(e->fence);
+               fence_put(e->fence);
+       }
+
        if (!e->file_priv) {
-               e->destroy(e);
+               kfree(e);
                return;
        }
 
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
new file mode 100644 (file)
index 0000000..0645c85
--- /dev/null
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * DRM core format related functions
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/bug.h>
+#include <linux/ctype.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fourcc.h>
+
+static char printable_char(int c)
+{
+       return isascii(c) && isprint(c) ? c : '?';
+}
+
+/**
+ * drm_get_format_name - return a string for drm fourcc format
+ * @format: format to compute name of
+ *
+ * Note that the buffer used by this function is globally shared and owned by
+ * the function itself.
+ *
+ * FIXME: This isn't really multithreading safe.
+ */
+const char *drm_get_format_name(uint32_t format)
+{
+       static char buf[32];
+
+       snprintf(buf, sizeof(buf),
+                "%c%c%c%c %s-endian (0x%08x)",
+                printable_char(format & 0xff),
+                printable_char((format >> 8) & 0xff),
+                printable_char((format >> 16) & 0xff),
+                printable_char((format >> 24) & 0x7f),
+                format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
+                format);
+
+       return buf;
+}
+EXPORT_SYMBOL(drm_get_format_name);
+
+/**
+ * drm_fb_get_bpp_depth - get the bpp/depth values for format
+ * @format: pixel format (DRM_FORMAT_*)
+ * @depth: storage for the depth value
+ * @bpp: storage for the bpp value
+ *
+ * This only supports RGB formats here for compat with code that doesn't use
+ * pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+                         int *bpp)
+{
+       switch (format) {
+       case DRM_FORMAT_C8:
+       case DRM_FORMAT_RGB332:
+       case DRM_FORMAT_BGR233:
+               *depth = 8;
+               *bpp = 8;
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_XBGR1555:
+       case DRM_FORMAT_RGBX5551:
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_ARGB1555:
+       case DRM_FORMAT_ABGR1555:
+       case DRM_FORMAT_RGBA5551:
+       case DRM_FORMAT_BGRA5551:
+               *depth = 15;
+               *bpp = 16;
+               break;
+       case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
+               *depth = 16;
+               *bpp = 16;
+               break;
+       case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
+               *depth = 24;
+               *bpp = 24;
+               break;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_BGRX8888:
+               *depth = 24;
+               *bpp = 32;
+               break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_XBGR2101010:
+       case DRM_FORMAT_RGBX1010102:
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_ARGB2101010:
+       case DRM_FORMAT_ABGR2101010:
+       case DRM_FORMAT_RGBA1010102:
+       case DRM_FORMAT_BGRA1010102:
+               *depth = 30;
+               *bpp = 32;
+               break;
+       case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_RGBA8888:
+       case DRM_FORMAT_BGRA8888:
+               *depth = 32;
+               *bpp = 32;
+               break;
+       default:
+               DRM_DEBUG_KMS("unsupported pixel format %s\n",
+                             drm_get_format_name(format));
+               *depth = 0;
+               *bpp = 0;
+               break;
+       }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 3;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_NV24:
+       case DRM_FORMAT_NV42:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
+
+/**
+ * drm_format_plane_cpp - determine the bytes per pixel value
+ * @format: pixel format (DRM_FORMAT_*)
+ * @plane: plane index
+ *
+ * Returns:
+ * The bytes per pixel value for the specified plane.
+ */
+int drm_format_plane_cpp(uint32_t format, int plane)
+{
+       unsigned int depth;
+       int bpp;
+
+       if (plane >= drm_format_num_planes(format))
+               return 0;
+
+       switch (format) {
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               return 2;
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_NV24:
+       case DRM_FORMAT_NV42:
+               return plane ? 2 : 1;
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV444:
+       case DRM_FORMAT_YVU444:
+               return 1;
+       default:
+               drm_fb_get_bpp_depth(format, &depth, &bpp);
+               return bpp >> 3;
+       }
+}
+EXPORT_SYMBOL(drm_format_plane_cpp);
+
+/**
+ * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The horizontal chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_horz_chroma_subsampling(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV411:
+       case DRM_FORMAT_YVU411:
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+               return 4;
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+       case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
+       case DRM_FORMAT_YUV422:
+       case DRM_FORMAT_YVU422:
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
+
+/**
+ * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * Returns:
+ * The vertical chroma subsampling factor for the
+ * specified pixel format.
+ */
+int drm_format_vert_chroma_subsampling(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_YUV410:
+       case DRM_FORMAT_YVU410:
+               return 4;
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+               return 2;
+       default:
+               return 1;
+       }
+}
+EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
+
+/**
+ * drm_format_plane_width - width of the plane given the first plane
+ * @width: width of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The width of @plane, given that the width of the first plane is @width.
+ */
+int drm_format_plane_width(int width, uint32_t format, int plane)
+{
+       if (plane >= drm_format_num_planes(format))
+               return 0;
+
+       if (plane == 0)
+               return width;
+
+       return width / drm_format_horz_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_width);
+
+/**
+ * drm_format_plane_height - height of the plane given the first plane
+ * @height: height of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The height of @plane, given that the height of the first plane is @height.
+ */
+int drm_format_plane_height(int height, uint32_t format, int plane)
+{
+       if (plane >= drm_format_num_planes(format))
+               return 0;
+
+       if (plane == 0)
+               return height;
+
+       return height / drm_format_vert_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_height);
index 32156060b9c96bb9ccfe0c0b06dc5057a0160fb6..5c19dde1cd317b95faa1ef600c9739254659e7c9 100644 (file)
@@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release);
  * @kref: kref of the object to free
  *
  * Called after the last reference to the object has been lost.
- * Must be called holding struct_ mutex
+ * Must be called holding &drm_device->struct_mutex.
  *
  * Frees the object
  */
index e1ab008b3f0841675d5b06ae6ceb4fc037b985f5..1d6c335584ec37d1ea399ccb4a07fd6e0120064e 100644 (file)
@@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
        return cma_obj;
 
 error:
-       drm->driver->gem_free_object(&cma_obj->base);
+       drm_gem_object_unreference_unlocked(&cma_obj->base);
        return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
         * and handle has the id what user can see.
         */
        ret = drm_gem_handle_create(file_priv, gem_obj, handle);
-       if (ret)
-               goto err_handle_create;
-
        /* drop reference from allocate - handle holds it now. */
        drm_gem_object_unreference_unlocked(gem_obj);
+       if (ret)
+               return ERR_PTR(ret);
 
        return cma_obj;
-
-err_handle_create:
-       drm->driver->gem_free_object(gem_obj);
-
-       return ERR_PTR(ret);
 }
 
 /**
index 5d469b2f26f46c611c3a2dca3c6359a813d3aab2..9ae353f4dd06a562cd226292e6e53b0b5517c5d5 100644 (file)
@@ -50,106 +50,24 @@ int drm_name_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_minor *minor = node->minor;
        struct drm_device *dev = minor->dev;
-       struct drm_master *master = minor->master;
-       if (!master)
-               return 0;
-
-       if (master->unique) {
-               seq_printf(m, "%s %s %s\n",
-                          dev->driver->name,
-                          dev_name(dev->dev), master->unique);
-       } else {
-               seq_printf(m, "%s %s\n",
-                          dev->driver->name, dev_name(dev->dev));
-       }
-       return 0;
-}
-
-/**
- * Called when "/proc/dri/.../vm" is read.
- *
- * Prints information about all mappings in drm_device::maplist.
- */
-int drm_vm_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_local_map *map;
-       struct drm_map_list *r_list;
-
-       /* Hardcoded from _DRM_FRAME_BUFFER,
-          _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
-          _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
-       const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
-       const char *type;
-       int i;
-
-       mutex_lock(&dev->struct_mutex);
-       seq_printf(m, "slot      offset       size type flags    address mtrr\n\n");
-       i = 0;
-       list_for_each_entry(r_list, &dev->maplist, head) {
-               map = r_list->map;
-               if (!map)
-                       continue;
-               if (map->type < 0 || map->type > 5)
-                       type = "??";
-               else
-                       type = types[map->type];
-
-               seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s  0x%02x 0x%08lx ",
-                          i,
-                          (unsigned long long)map->offset,
-                          map->size, type, map->flags,
-                          (unsigned long) r_list->user_token);
-               if (map->mtrr < 0)
-                       seq_printf(m, "none\n");
-               else
-                       seq_printf(m, "%4d\n", map->mtrr);
-               i++;
-       }
-       mutex_unlock(&dev->struct_mutex);
-       return 0;
-}
+       struct drm_master *master;
 
-/**
- * Called when "/proc/dri/.../bufs" is read.
- */
-int drm_bufs_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_device_dma *dma;
-       int i, seg_pages;
-
-       mutex_lock(&dev->struct_mutex);
-       dma = dev->dma;
-       if (!dma) {
-               mutex_unlock(&dev->struct_mutex);
-               return 0;
-       }
-
-       seq_printf(m, " o     size count  free   segs pages    kB\n\n");
-       for (i = 0; i <= DRM_MAX_ORDER; i++) {
-               if (dma->bufs[i].buf_count) {
-                       seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
-                       seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
-                                  i,
-                                  dma->bufs[i].buf_size,
-                                  dma->bufs[i].buf_count,
-                                  0,
-                                  dma->bufs[i].seg_count,
-                                  seg_pages,
-                                  seg_pages * PAGE_SIZE / 1024);
-               }
-       }
-       seq_printf(m, "\n");
-       for (i = 0; i < dma->buf_count; i++) {
-               if (i && !(i % 32))
-                       seq_printf(m, "\n");
-               seq_printf(m, " %d", dma->buflist[i]->list);
-       }
+       mutex_lock(&dev->master_mutex);
+       master = dev->master;
+       if (!master)
+               goto out_unlock;
+
+       seq_printf(m, "%s", dev->driver->name);
+       if (dev->dev)
+               seq_printf(m, " dev=%s", dev_name(dev->dev));
+       if (master && master->unique)
+               seq_printf(m, " master=%s", master->unique);
+       if (dev->unique)
+               seq_printf(m, " unique=%s", dev->unique);
        seq_printf(m, "\n");
-       mutex_unlock(&dev->struct_mutex);
+out_unlock:
+       mutex_unlock(&dev->master_mutex);
+
        return 0;
 }
 
@@ -184,7 +102,7 @@ int drm_clients_info(struct seq_file *m, void *data)
                           task ? task->comm : "<unknown>",
                           pid_vnr(priv->pid),
                           priv->minor->index,
-                          priv->is_master ? 'y' : 'n',
+                          drm_is_current_master(priv) ? 'y' : 'n',
                           priv->authenticated ? 'y' : 'n',
                           from_kuid_munged(seq_user_ns(m), priv->uid),
                           priv->magic);
@@ -194,7 +112,6 @@ int drm_clients_info(struct seq_file *m, void *data)
        return 0;
 }
 
-
 static int drm_gem_one_name_info(int id, void *ptr, void *data)
 {
        struct drm_gem_object *obj = ptr;
index 902cf6a152128231f60bb15aab51028262d17f8f..b86dc9b921a5e8bfc806d3f1e4a4107125335306 100644 (file)
@@ -29,15 +29,9 @@ extern struct mutex drm_global_mutex;
 void drm_lastclose(struct drm_device *dev);
 
 /* drm_pci.c */
-int drm_pci_set_unique(struct drm_device *dev,
-                      struct drm_master *master,
-                      struct drm_unique *u);
 int drm_irq_by_busid(struct drm_device *dev, void *data,
                     struct drm_file *file_priv);
 
-/* drm_vm.c */
-int drm_vma_info(struct seq_file *m, void *data);
-
 /* drm_prime.c */
 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *file_priv);
@@ -51,8 +45,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
 
 /* drm_info.c */
 int drm_name_info(struct seq_file *m, void *data);
-int drm_vm_info(struct seq_file *m, void *data);
-int drm_bufs_info(struct seq_file *m, void *data);
 int drm_clients_info(struct seq_file *m, void* data);
 int drm_gem_name_info(struct seq_file *m, void *data);
 
@@ -67,6 +59,12 @@ int drm_getmagic(struct drm_device *dev, void *data,
                 struct drm_file *file_priv);
 int drm_authmagic(struct drm_device *dev, void *data,
                  struct drm_file *file_priv);
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv);
+int drm_master_open(struct drm_file *file_priv);
+void drm_master_release(struct drm_file *file_priv);
 
 /* drm_sysfs.c */
 extern struct class *drm_class;
@@ -92,13 +90,6 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
 void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
 void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
 
-/* drm_drv.c */
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv);
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv);
-struct drm_master *drm_master_create(struct drm_minor *minor);
-
 /* drm_debugfs.c */
 #if defined(CONFIG_DEBUG_FS)
 int drm_debugfs_init(struct drm_minor *minor, int minor_id,
index b7a39771c152e0b48ff4fd8cce4a6e9ab7fcbe95..33af4a5ddca153096067df91ae7400383ddbdadf 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_core.h>
+#include <drm/drm_auth.h>
 #include "drm_legacy.h"
 #include "drm_internal.h"
 #include "drm_crtc_internal.h"
 #include <linux/pci.h>
 #include <linux/export.h>
 
+/**
+ * DOC: getunique and setversion story
+ *
+ * BEWARE THE DRAGONS! MIND THE TRAPDOORS!
+ *
+ * In an attempt to warn anyone else who's trying to figure out what's going
+ * on here, I'll try to summarize the story. First things first, let's clear up
+ * the names, because the kernel internals, libdrm and the ioctls are all named
+ * differently:
+ *
+ *  - GET_UNIQUE ioctl, implemented by drm_getunique is wrapped up in libdrm
+ *    through the drmGetBusid function.
+ *  - The libdrm drmSetBusid function is backed by the SET_UNIQUE ioctl. All
+ *    that code is nerved in the kernel with drm_invalid_op().
+ *  - The internal set_busid kernel functions and driver callbacks are
+ *    exclusively use by the SET_VERSION ioctl, because only drm 1.0 (which is
+ *    nerved) allowed userspace to set the busid through the above ioctl.
+ *  - Other ioctls and functions involved are named consistently.
+ *
+ * For anyone wondering what's the difference between drm 1.1 and 1.4: Correctly
+ * handling pci domains in the busid on ppc. Doing this correctly was only
+ * implemented in libdrm in 2010, hence can't be nerved yet. No one knows what's
+ * special with drm 1.2 and 1.3.
+ *
+ * Now the actual horror story of how device lookup in drm works. At large,
+ * there's 2 different ways, either by busid, or by device driver name.
+ *
+ * Opening by busid is fairly simple:
+ *
+ * 1. First call SET_VERSION to make sure pci domains are handled properly. As a
+ *    side-effect this fills out the unique name in the master structure.
+ * 2. Call GET_UNIQUE to read out the unique name from the master structure,
+ *    which matches the busid thanks to step 1. If it doesn't, proceed to try
+ *    the next device node.
+ *
+ * Opening by name is slightly different:
+ *
+ * 1. Directly call VERSION to get the version and to match against the driver
+ *    name returned by that ioctl. Note that SET_VERSION is not called, which
+ *    means the the unique name for the master node just opening is _not_ filled
+ *    out. This despite that with current drm device nodes are always bound to
+ *    one device, and can't be runtime assigned like with drm 1.0.
+ * 2. Match driver name. If it mismatches, proceed to the next device node.
+ * 3. Call GET_UNIQUE, and check whether the unique name has length zero (by
+ *    checking that the first byte in the string is 0). If that's not the case
+ *    libdrm skips and proceeds to the next device node. Probably this is just
+ *    copypasta from drm 1.0 times where a set unique name meant that the driver
+ *    was in use already, but that's just conjecture.
+ *
+ * Long story short: To keep the open by name logic working, GET_UNIQUE must
+ * _not_ return a unique string when SET_VERSION hasn't been called yet,
+ * otherwise libdrm breaks. Even when that unique string can't ever change, and
+ * is totally irrelevant for actually opening the device because runtime
+ * assignable device instances were only support in drm 1.0, which is long dead.
+ * But the libdrm code in drmOpenByName somehow survived, hence this can't be
+ * broken.
+ */
+
 static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
 
@@ -75,51 +134,6 @@ drm_unset_busid(struct drm_device *dev,
        master->unique_len = 0;
 }
 
-/*
- * Set the bus id.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_unique structure.
- * \return zero on success or a negative number on failure.
- *
- * Copies the bus id from userspace into drm_device::unique, and verifies that
- * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
- * in interface version 1.1 and will return EBUSY when setversion has requested
- * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
- * UMS was only ever supported on pci devices.
- */
-static int drm_setunique(struct drm_device *dev, void *data,
-                 struct drm_file *file_priv)
-{
-       struct drm_unique *u = data;
-       struct drm_master *master = file_priv->master;
-       int ret;
-
-       if (master->unique_len || master->unique)
-               return -EBUSY;
-
-       if (!u->unique_len || u->unique_len > 1024)
-               return -EINVAL;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return 0;
-
-       if (WARN_ON(!dev->pdev))
-               return -EINVAL;
-
-       ret = drm_pci_set_unique(dev, master, u);
-       if (ret)
-               goto err;
-
-       return 0;
-
-err:
-       drm_unset_busid(dev, master);
-       return ret;
-}
-
 static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
 {
        struct drm_master *master = file_priv->master;
@@ -135,12 +149,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
                        return ret;
                }
        } else {
-               if (WARN(dev->unique == NULL,
-                        "No drm_driver.set_busid() implementation provided by "
-                        "%ps. Use drm_dev_set_unique() to set the unique "
-                        "name explicitly.", dev->driver))
-                       return -EINVAL;
-
+               WARN_ON(!dev->unique);
                master->unique = kstrdup(dev->unique, GFP_KERNEL);
                if (master->unique)
                        master->unique_len = strlen(dev->unique);
@@ -473,7 +482,8 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
                return -EACCES;
 
        /* MASTER is only for master or control clients */
-       if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
+       if (unlikely((flags & DRM_MASTER) && 
+                    !drm_is_current_master(file_priv) &&
                     !drm_is_control_client(file_priv)))
                return -EACCES;
 
@@ -504,7 +514,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
                      DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
-       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+       DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
@@ -513,10 +523,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 
-       DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+       DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER),
 
        DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
@@ -524,8 +534,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
 
-       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
 
        DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -638,7 +648,7 @@ long drm_ioctl(struct file *filp,
        int retcode = -EINVAL;
        char stack_kdata[128];
        char *kdata = NULL;
-       unsigned int usize, asize, drv_size;
+       unsigned int in_size, out_size, drv_size, ksize;
        bool is_driver_ioctl;
 
        dev = file_priv->minor->dev;
@@ -661,9 +671,12 @@ long drm_ioctl(struct file *filp,
        }
 
        drv_size = _IOC_SIZE(ioctl->cmd);
-       usize = _IOC_SIZE(cmd);
-       asize = max(usize, drv_size);
-       cmd = ioctl->cmd;
+       out_size = in_size = _IOC_SIZE(cmd);
+       if ((cmd & ioctl->cmd & IOC_IN) == 0)
+               in_size = 0;
+       if ((cmd & ioctl->cmd & IOC_OUT) == 0)
+               out_size = 0;
+       ksize = max(max(in_size, out_size), drv_size);
 
        DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
                  task_pid_nr(current),
@@ -683,30 +696,24 @@ long drm_ioctl(struct file *filp,
        if (unlikely(retcode))
                goto err_i1;
 
-       if (cmd & (IOC_IN | IOC_OUT)) {
-               if (asize <= sizeof(stack_kdata)) {
-                       kdata = stack_kdata;
-               } else {
-                       kdata = kmalloc(asize, GFP_KERNEL);
-                       if (!kdata) {
-                               retcode = -ENOMEM;
-                               goto err_i1;
-                       }
+       if (ksize <= sizeof(stack_kdata)) {
+               kdata = stack_kdata;
+       } else {
+               kdata = kmalloc(ksize, GFP_KERNEL);
+               if (!kdata) {
+                       retcode = -ENOMEM;
+                       goto err_i1;
                }
-               if (asize > usize)
-                       memset(kdata + usize, 0, asize - usize);
        }
 
-       if (cmd & IOC_IN) {
-               if (copy_from_user(kdata, (void __user *)arg,
-                                  usize) != 0) {
-                       retcode = -EFAULT;
-                       goto err_i1;
-               }
-       } else if (cmd & IOC_OUT) {
-               memset(kdata, 0, usize);
+       if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
+               retcode = -EFAULT;
+               goto err_i1;
        }
 
+       if (ksize > in_size)
+               memset(kdata + in_size, 0, ksize - in_size);
+
        /* Enforce sane locking for kms driver ioctls. Core ioctls are
         * too messy still. */
        if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
@@ -718,11 +725,8 @@ long drm_ioctl(struct file *filp,
                mutex_unlock(&drm_global_mutex);
        }
 
-       if (cmd & IOC_OUT) {
-               if (copy_to_user((void __user *)arg, kdata,
-                                usize) != 0)
-                       retcode = -EFAULT;
-       }
+       if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
+               retcode = -EFAULT;
 
       err_i1:
        if (!ioctl)
@@ -749,7 +753,7 @@ EXPORT_SYMBOL(drm_ioctl);
  * shouldn't be used by any drivers.
  *
  * Returns:
- * True if the @nr corresponds to a DRM core ioctl numer, false otherwise.
+ * True if the @nr corresponds to a DRM core ioctl number, false otherwise.
  */
 bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
 {
index 0fac801c18fe941f7114ed141a336e92f90108c2..77f357b2c3869a8bb13eea58c8c76819df34e70a 100644 (file)
 #include <linux/vgaarb.h>
 #include <linux/export.h>
 
-/* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, pipe, count) \
-       ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
-
 /* Retry timestamp calculation up to 3 times to satisfy
  * drm_timestamp_precision before giving up.
  */
@@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
                         struct timeval *t_vblank, u32 last)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-       u32 tslot;
 
        assert_spin_locked(&dev->vblank_time_lock);
 
        vblank->last = last;
 
-       /* All writers hold the spinlock, but readers are serialized by
-        * the latching of vblank->count below.
-        */
-       tslot = vblank->count + vblank_count_inc;
-       vblanktimestamp(dev, pipe, tslot) = *t_vblank;
-
-       /*
-        * vblank timestamp updates are protected on the write side with
-        * vblank_time_lock, but on the read side done locklessly using a
-        * sequence-lock on the vblank counter. Ensure correct ordering using
-        * memory barrriers. We need the barrier both before and also after the
-        * counter update to synchronize with the next timestamp write.
-        * The read-side barriers for this are in drm_vblank_count_and_time.
-        */
-       smp_wmb();
+       write_seqlock(&vblank->seqlock);
+       vblank->time = *t_vblank;
        vblank->count += vblank_count_inc;
-       smp_wmb();
+       write_sequnlock(&vblank->seqlock);
 }
 
-/**
- * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
- * @dev: DRM device
- * @pipe: index of CRTC for which to reset the timestamp
- *
+/*
  * Reset the stored timestamp for the current vblank count to correspond
  * to the last vblank occurred.
  *
@@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
        spin_unlock(&dev->vblank_time_lock);
 }
 
-/**
- * drm_update_vblank_count - update the master vblank counter
- * @dev: DRM device
- * @pipe: counter to update
- *
+/*
  * Call back into the driver to update the appropriate vblank counter
  * (specified by @pipe).  Deal with wraparound, if it occurred, and
  * update the last read value so we can deal with wraparound on the next
@@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
                const struct timeval *t_old;
                u64 diff_ns;
 
-               t_old = &vblanktimestamp(dev, pipe, vblank->count);
+               t_old = &vblank->time;
                diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
 
                /*
@@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
                diff = 1;
        }
 
-       /*
-        * FIMXE: Need to replace this hack with proper seqlocks.
-        *
-        * Restrict the bump of the software vblank counter to a safe maximum
-        * value of +1 whenever there is the possibility that concurrent readers
-        * of vblank timestamps could be active at the moment, as the current
-        * implementation of the timestamp caching and updating is not safe
-        * against concurrent readers for calls to store_vblank() with a bump
-        * of anything but +1. A bump != 1 would very likely return corrupted
-        * timestamps to userspace, because the same slot in the cache could
-        * be concurrently written by store_vblank() and read by one of those
-        * readers without the read-retry logic detecting the collision.
-        *
-        * Concurrent readers can exist when we are called from the
-        * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
-        * irq callers. However, all those calls to us are happening with the
-        * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
-        * can't increase while we are executing. Therefore a zero refcount at
-        * this point is safe for arbitrary counter bumps if we are called
-        * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
-        * we must also accept a refcount of 1, as whenever we are called from
-        * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
-        * we must let that one pass through in order to not lose vblank counts
-        * during vblank irq off - which would completely defeat the whole
-        * point of this routine.
-        *
-        * Whenever we are called from vblank irq, we have to assume concurrent
-        * readers exist or can show up any time during our execution, even if
-        * the refcount is currently zero, as vblank irqs are usually only
-        * enabled due to the presence of readers, and because when we are called
-        * from vblank irq we can't hold the vbl_lock to protect us from sudden
-        * bumps in vblank refcount. Therefore also restrict bumps to +1 when
-        * called from vblank irq.
-        */
-       if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
-           (flags & DRM_CALLED_FROM_VBLIRQ))) {
-               DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
-                             "refcount %u, vblirq %u\n", pipe, diff,
-                             atomic_read(&vblank->refcount),
-                             (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
-               diff = 1;
-       }
-
        DRM_DEBUG_VBL("updating vblank count on crtc %u:"
                      " current=%u, diff=%u, hw=%u hw_last=%u\n",
                      pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
        store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
 }
 
+/**
+ * drm_accurate_vblank_count - retrieve the master vblank counter
+ * @crtc: which counter to retrieve
+ *
+ * This function is similar to @drm_crtc_vblank_count but this
+ * function interpolates to handle a race with vblank irq's.
+ *
+ * This is mostly useful for hardware that can obtain the scanout
+ * position, but doesn't have a frame counter.
+ */
+u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       unsigned int pipe = drm_crtc_index(crtc);
+       u32 vblank;
+       unsigned long flags;
+
+       WARN(!dev->driver->get_vblank_timestamp,
+            "This function requires support for accurate vblank timestamps.");
+
+       spin_lock_irqsave(&dev->vblank_time_lock, flags);
+
+       drm_update_vblank_count(dev, pipe, 0);
+       vblank = drm_vblank_count(dev, pipe);
+
+       spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
+
+       return vblank;
+}
+EXPORT_SYMBOL(drm_accurate_vblank_count);
+
 /*
  * Disable vblank irq's on crtc, make sure that last vblank count
  * of hardware and corresponding consistent software vblank counter
@@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
                init_waitqueue_head(&vblank->queue);
                setup_timer(&vblank->disable_timer, vblank_disable_fn,
                            (unsigned long)vblank);
+               seqlock_init(&vblank->seqlock);
        }
 
        DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -569,7 +532,7 @@ int drm_irq_uninstall(struct drm_device *dev)
 
        /*
         * Wake up any waiters so they don't hang. This is just to paper over
-        * isssues for UMS drivers which aren't in full control of their
+        * issues for UMS drivers which aren't in full control of their
         * vblank/irq handling. KMS drivers must ensure that vblanks are all
         * disabled when uninstalling the irq handler.
         */
@@ -631,7 +594,7 @@ int drm_control(struct drm_device *dev, void *data,
                return 0;
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
-       /* UMS was only ever support on pci devices. */
+       /* UMS was only ever supported on pci devices. */
        if (WARN_ON(!dev->pdev))
                return -EINVAL;
 
@@ -982,31 +945,24 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
  *
  * This is the legacy version of drm_crtc_vblank_count_and_time().
  */
-u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
-                             struct timeval *vblanktime)
+static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
+                                    struct timeval *vblanktime)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
-       int count = DRM_TIMESTAMP_MAXRETRIES;
-       u32 cur_vblank;
+       u32 vblank_count;
+       unsigned int seq;
 
        if (WARN_ON(pipe >= dev->num_crtcs))
                return 0;
 
-       /*
-        * Vblank timestamps are read lockless. To ensure consistency the vblank
-        * counter is rechecked and ordering is ensured using memory barriers.
-        * This works like a seqlock. The write-side barriers are in store_vblank.
-        */
        do {
-               cur_vblank = vblank->count;
-               smp_rmb();
-               *vblanktime = vblanktimestamp(dev, pipe, cur_vblank);
-               smp_rmb();
-       } while (cur_vblank != vblank->count && --count > 0);
+               seq = read_seqbegin(&vblank->seqlock);
+               vblank_count = vblank->count;
+               *vblanktime = vblank->time;
+       } while (read_seqretry(&vblank->seqlock, seq));
 
-       return cur_vblank;
+       return vblank_count;
 }
-EXPORT_SYMBOL(drm_vblank_count_and_time);
 
 /**
  * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
@@ -1018,8 +974,6 @@ EXPORT_SYMBOL(drm_vblank_count_and_time);
  * vblank events since the system was booted, including lost events due to
  * modesetting activity. Returns corresponding system timestamp of the time
  * of the vblank interval that corresponds to the current vblank counter value.
- *
- * This is the native KMS version of drm_vblank_count_and_time().
  */
 u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
                                   struct timeval *vblanktime)
@@ -1037,39 +991,11 @@ static void send_vblank_event(struct drm_device *dev,
        e->event.tv_sec = now->tv_sec;
        e->event.tv_usec = now->tv_usec;
 
-       drm_send_event_locked(dev, &e->base);
-
        trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
                                         e->event.sequence);
-}
-
-/**
- * drm_arm_vblank_event - arm vblank event after pageflip
- * @dev: DRM device
- * @pipe: CRTC index
- * @e: the event to prepare to send
- *
- * A lot of drivers need to generate vblank events for the very next vblank
- * interrupt. For example when the page flip interrupt happens when the page
- * flip gets armed, but not when it actually executes within the next vblank
- * period. This helper function implements exactly the required vblank arming
- * behaviour.
- *
- * Caller must hold event lock. Caller must also hold a vblank reference for
- * the event @e, which will be dropped when the next vblank arrives.
- *
- * This is the legacy version of drm_crtc_arm_vblank_event().
- */
-void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
-                         struct drm_pending_vblank_event *e)
-{
-       assert_spin_locked(&dev->event_lock);
 
-       e->pipe = pipe;
-       e->event.sequence = drm_vblank_count(dev, pipe);
-       list_add_tail(&e->base.link, &dev->vblank_event_list);
+       drm_send_event_locked(dev, &e->base);
 }
-EXPORT_SYMBOL(drm_arm_vblank_event);
 
 /**
  * drm_crtc_arm_vblank_event - arm vblank event after pageflip
@@ -1084,32 +1010,35 @@ EXPORT_SYMBOL(drm_arm_vblank_event);
  *
  * Caller must hold event lock. Caller must also hold a vblank reference for
  * the event @e, which will be dropped when the next vblank arrives.
- *
- * This is the native KMS version of drm_arm_vblank_event().
  */
 void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
                               struct drm_pending_vblank_event *e)
 {
-       drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+       struct drm_device *dev = crtc->dev;
+       unsigned int pipe = drm_crtc_index(crtc);
+
+       assert_spin_locked(&dev->event_lock);
+
+       e->pipe = pipe;
+       e->event.sequence = drm_vblank_count(dev, pipe);
+       list_add_tail(&e->base.link, &dev->vblank_event_list);
 }
 EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
 
 /**
- * drm_send_vblank_event - helper to send vblank event after pageflip
- * @dev: DRM device
- * @pipe: CRTC index
+ * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
  * @e: the event to send
  *
  * Updates sequence # and timestamp on event, and sends it to userspace.
  * Caller must hold event lock.
- *
- * This is the legacy version of drm_crtc_send_vblank_event().
  */
-void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
-                          struct drm_pending_vblank_event *e)
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+                               struct drm_pending_vblank_event *e)
 {
+       struct drm_device *dev = crtc->dev;
+       unsigned int seq, pipe = drm_crtc_index(crtc);
        struct timeval now;
-       unsigned int seq;
 
        if (dev->num_crtcs > 0) {
                seq = drm_vblank_count_and_time(dev, pipe, &now);
@@ -1121,23 +1050,6 @@ void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
        e->pipe = pipe;
        send_vblank_event(dev, e, seq, &now);
 }
-EXPORT_SYMBOL(drm_send_vblank_event);
-
-/**
- * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
- * @crtc: the source CRTC of the vblank event
- * @e: the event to send
- *
- * Updates sequence # and timestamp on event, and sends it to userspace.
- * Caller must hold event lock.
- *
- * This is the native KMS version of drm_send_vblank_event().
- */
-void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
-                               struct drm_pending_vblank_event *e)
-{
-       drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
-}
 EXPORT_SYMBOL(drm_crtc_send_vblank_event);
 
 /**
@@ -1193,7 +1105,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
  * Returns:
  * Zero on success or a negative error code on failure.
  */
-int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
+static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
        unsigned long irqflags;
@@ -1219,7 +1131,6 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
 
        return ret;
 }
-EXPORT_SYMBOL(drm_vblank_get);
 
 /**
  * drm_crtc_vblank_get - get a reference count on vblank events
@@ -1228,8 +1139,6 @@ EXPORT_SYMBOL(drm_vblank_get);
  * Acquire a reference count on vblank events to avoid having them disabled
  * while in use.
  *
- * This is the native kms version of drm_vblank_get().
- *
  * Returns:
  * Zero on success or a negative error code on failure.
  */
@@ -1249,7 +1158,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
  *
  * This is the legacy version of drm_crtc_vblank_put().
  */
-void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
+static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
 
@@ -1270,7 +1179,6 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
                                  jiffies + ((drm_vblank_offdelay * HZ)/1000));
        }
 }
-EXPORT_SYMBOL(drm_vblank_put);
 
 /**
  * drm_crtc_vblank_put - give up ownership of vblank events
@@ -1278,8 +1186,6 @@ EXPORT_SYMBOL(drm_vblank_put);
  *
  * Release ownership of a given vblank counter, turning off interrupts
  * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
- *
- * This is the native kms version of drm_vblank_put().
  */
 void drm_crtc_vblank_put(struct drm_crtc *crtc)
 {
@@ -1679,12 +1585,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
 
        seq = drm_vblank_count_and_time(dev, pipe, &now);
 
-       if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
-           (seq - vblwait->request.sequence) <= (1 << 23)) {
-               vblwait->request.sequence = seq + 1;
-               vblwait->reply.sequence = vblwait->request.sequence;
-       }
-
        DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
                  vblwait->request.sequence, seq, pipe);
 
@@ -1781,6 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
                goto done;
        }
 
+       if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+           (seq - vblwait->request.sequence) <= (1 << 23)) {
+               vblwait->request.sequence = seq + 1;
+       }
+
        if (flags & _DRM_VBLANK_EVENT) {
                /* must hold on to the vblank ref until the event fires
                 * drm_vblank_put will be called asynchronously
@@ -1788,14 +1693,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
                return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
        }
 
-       if ((flags & _DRM_VBLANK_NEXTONMISS) &&
-           (seq - vblwait->request.sequence) <= (1<<23)) {
-               vblwait->request.sequence = seq + 1;
-       }
-
        DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
                  vblwait->request.sequence, pipe);
-       vblank->last_wait = vblwait->request.sequence;
        DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
                    (((drm_vblank_count(dev, pipe) -
                       vblwait->request.sequence) <= (1 << 23)) ||
index d3b6ee357a2b432aee09fb5b3780c6df84c7a577..c6f422e879dd6986f372bb2dfb6a5c764fcc045d 100644 (file)
@@ -88,14 +88,10 @@ struct drm_agp_mem {
        struct list_head head;
 };
 
-/*
- * Generic Userspace Locking-API
- */
-
-int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f);
+/* drm_lock.c */
 int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
 int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
-int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx);
+void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
 
 /* DMA support */
 int drm_legacy_dma_setup(struct drm_device *dev);
index daa2ff12101ba366d82be5b550ba5cf5738a6ff2..48ac0ebbd6634e947fecbd701d96818152b0e9cc 100644 (file)
 
 static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
 
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static
+int drm_lock_take(struct drm_lock_data *lock_data,
+                 unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       do {
+               old = *lock;
+               if (old & _DRM_LOCK_HELD)
+                       new = old | _DRM_LOCK_CONT;
+               else {
+                       new = context | _DRM_LOCK_HELD |
+                               ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+                                _DRM_LOCK_CONT : 0);
+               }
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       spin_unlock_bh(&lock_data->spinlock);
+
+       if (_DRM_LOCKING_CONTEXT(old) == context) {
+               if (old & _DRM_LOCK_HELD) {
+                       if (context != DRM_KERNEL_CONTEXT) {
+                               DRM_ERROR("%d holds heavyweight lock\n",
+                                         context);
+                       }
+                       return 0;
+               }
+       }
+
+       if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
+               /* Have lock */
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * This takes a lock forcibly and hands it to context. Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+                            unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       lock_data->file_priv = NULL;
+       do {
+               old = *lock;
+               new = context | _DRM_LOCK_HELD;
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+       return 1;
+}
+
+static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
+                               unsigned int context)
+{
+       unsigned int old, new, prev;
+       volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+       spin_lock_bh(&lock_data->spinlock);
+       if (lock_data->kernel_waiters != 0) {
+               drm_lock_transfer(lock_data, 0);
+               lock_data->idle_has_lock = 1;
+               spin_unlock_bh(&lock_data->spinlock);
+               return 1;
+       }
+       spin_unlock_bh(&lock_data->spinlock);
+
+       do {
+               old = *lock;
+               new = _DRM_LOCKING_CONTEXT(old);
+               prev = cmpxchg(lock, old, new);
+       } while (prev != old);
+
+       if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+               DRM_ERROR("%d freed heavyweight lock held by %d\n",
+                         context, _DRM_LOCKING_CONTEXT(old));
+               return 1;
+       }
+       wake_up_interruptible(&lock_data->lock_queue);
+       return 0;
+}
+
 /**
  * Lock ioctl.
  *
@@ -115,7 +219,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
        /* don't set the block all signals on the master process for now 
         * really probably not the correct answer but lets us debug xkb
         * xserver for now */
-       if (!file_priv->is_master) {
+       if (!drm_is_current_master(file_priv)) {
                dev->sigdata.context = lock->context;
                dev->sigdata.lock = master->lock.hw_lock;
        }
@@ -164,120 +268,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
        return 0;
 }
 
-/**
- * Take the heavyweight lock.
- *
- * \param lock lock pointer.
- * \param context locking context.
- * \return one if the lock is held, or zero otherwise.
- *
- * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static
-int drm_lock_take(struct drm_lock_data *lock_data,
-                 unsigned int context)
-{
-       unsigned int old, new, prev;
-       volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
-       spin_lock_bh(&lock_data->spinlock);
-       do {
-               old = *lock;
-               if (old & _DRM_LOCK_HELD)
-                       new = old | _DRM_LOCK_CONT;
-               else {
-                       new = context | _DRM_LOCK_HELD |
-                               ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
-                                _DRM_LOCK_CONT : 0);
-               }
-               prev = cmpxchg(lock, old, new);
-       } while (prev != old);
-       spin_unlock_bh(&lock_data->spinlock);
-
-       if (_DRM_LOCKING_CONTEXT(old) == context) {
-               if (old & _DRM_LOCK_HELD) {
-                       if (context != DRM_KERNEL_CONTEXT) {
-                               DRM_ERROR("%d holds heavyweight lock\n",
-                                         context);
-                       }
-                       return 0;
-               }
-       }
-
-       if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
-               /* Have lock */
-               return 1;
-       }
-       return 0;
-}
-
-/**
- * This takes a lock forcibly and hands it to context. Should ONLY be used
- * inside *_unlock to give lock to kernel before calling *_dma_schedule.
- *
- * \param dev DRM device.
- * \param lock lock pointer.
- * \param context locking context.
- * \return always one.
- *
- * Resets the lock file pointer.
- * Marks the lock as held by the given context, via the \p cmpxchg instruction.
- */
-static int drm_lock_transfer(struct drm_lock_data *lock_data,
-                            unsigned int context)
-{
-       unsigned int old, new, prev;
-       volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
-       lock_data->file_priv = NULL;
-       do {
-               old = *lock;
-               new = context | _DRM_LOCK_HELD;
-               prev = cmpxchg(lock, old, new);
-       } while (prev != old);
-       return 1;
-}
-
-/**
- * Free lock.
- *
- * \param dev DRM device.
- * \param lock lock.
- * \param context context.
- *
- * Resets the lock file pointer.
- * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
- * waiting on the lock queue.
- */
-int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
-{
-       unsigned int old, new, prev;
-       volatile unsigned int *lock = &lock_data->hw_lock->lock;
-
-       spin_lock_bh(&lock_data->spinlock);
-       if (lock_data->kernel_waiters != 0) {
-               drm_lock_transfer(lock_data, 0);
-               lock_data->idle_has_lock = 1;
-               spin_unlock_bh(&lock_data->spinlock);
-               return 1;
-       }
-       spin_unlock_bh(&lock_data->spinlock);
-
-       do {
-               old = *lock;
-               new = _DRM_LOCKING_CONTEXT(old);
-               prev = cmpxchg(lock, old, new);
-       } while (prev != old);
-
-       if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
-               DRM_ERROR("%d freed heavyweight lock held by %d\n",
-                         context, _DRM_LOCKING_CONTEXT(old));
-               return 1;
-       }
-       wake_up_interruptible(&lock_data->lock_queue);
-       return 0;
-}
-
 /**
  * This function returns immediately and takes the hw lock
  * with the kernel context if it is free, otherwise it gets the highest priority when and if
@@ -330,11 +320,27 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
 }
 EXPORT_SYMBOL(drm_legacy_idlelock_release);
 
-int drm_legacy_i_have_hw_lock(struct drm_device *dev,
-                             struct drm_file *file_priv)
+static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
+                                    struct drm_file *file_priv)
 {
        struct drm_master *master = file_priv->master;
        return (file_priv->lock_count && master->lock.hw_lock &&
                _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
                master->lock.file_priv == file_priv);
 }
+
+void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
+{
+       struct drm_file *file_priv = filp->private_data;
+
+       /* if the master has gone away we can't do anything with the lock */
+       if (!dev->master)
+               return;
+
+       if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
+               DRM_DEBUG("File %p released, freeing lock for context %d\n",
+                         filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+               drm_legacy_lock_free(&file_priv->master->lock,
+                                    _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
+       }
+}
index 87a8cb73366f258697e0001db3260dd93e19e294..fc0ebd273ef827d140993fda0ac8e2650c19632e 100644 (file)
@@ -44,7 +44,7 @@
 # include <asm/agp.h>
 #else
 # ifdef __powerpc__
-#  define PAGE_AGP     __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+#  define PAGE_AGP     pgprot_noncached_wc(PAGE_KERNEL)
 # else
 #  define PAGE_AGP     PAGE_KERNEL
 # endif
index f5d80839a90c2941b7d0243ef17df8d949baa079..af0d471ee246b6b3fad5ad9e91c99885f3010c80 100644 (file)
@@ -60,6 +60,21 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
        return 0;
 }
 
+static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+       int err;
+
+       err = of_device_uevent_modalias(dev, env);
+       if (err != -ENODEV)
+               return err;
+
+       add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX,
+                      dsi->name);
+
+       return 0;
+}
+
 static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
        .runtime_suspend = pm_generic_runtime_suspend,
        .runtime_resume = pm_generic_runtime_resume,
@@ -74,6 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
 static struct bus_type mipi_dsi_bus_type = {
        .name = "mipi-dsi",
        .match = mipi_dsi_device_match,
+       .uevent = mipi_dsi_uevent,
        .pm = &mipi_dsi_device_pm_ops,
 };
 
@@ -982,6 +998,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
 }
 EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
 
+/**
+ * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
+ *    the Tearing Effect output signal of the display module
+ * @dsi: DSI peripheral device
+ * @scanline: scanline to use as trigger
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
+{
+       u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
+                         scanline & 0xff };
+       ssize_t err;
+
+       err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
+
 /**
  * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
  *    data used by the interface
index 04de6fd88f8c3a7f5d503c45768582a10a71efd4..cb39f45d6a16befdc9cfac339d1703a2d9a217e5 100644 (file)
@@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
        struct drm_mm_node *hole;
-       u64 end = node->start + node->size;
+       u64 end;
        u64 hole_start;
        u64 hole_end;
 
        BUG_ON(node == NULL);
 
+       end = node->start + node->size;
+
        /* Find the relevant hole to add our node to */
        drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
                if (hole_start > node->start || hole_end < end)
index 7def3d58da188990390ee84fcca75b312ea22735..fc5040ae5f256ef2987fba0ce19ad1a8736ff5ef 100644 (file)
@@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
  *
  * This function is to create the modeline based on the GTF algorithm.
  * Generalized Timing Formula is derived from:
+ *
  *     GTF Spreadsheet by Andy Morrish (1/5/97)
  *     available at http://www.vesa.org
  *
@@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
  * I also refer to the function of fb_get_mode in the file of
  * drivers/video/fbmon.c
  *
- * Standard GTF parameters:
+ * Standard GTF parameters::
+ *
  *     M = 600
  *     C = 40
  *     K = 128
@@ -1518,6 +1520,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
        if (out->status != MODE_OK)
                goto out;
 
+       drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
+
        ret = 0;
 
 out:
index e3a4adf03e7bfccbb3679c3f7a2416146a33e130..61146f5b4f56a90dc7dc371289ce11a23d38b9e6 100644 (file)
  *
  * As KMS moves toward more fine grained locking, and atomic ioctl where
  * userspace can indirectly control locking order, it becomes necessary
- * to use ww_mutex and acquire-contexts to avoid deadlocks.  But because
+ * to use &ww_mutex and acquire-contexts to avoid deadlocks.  But because
  * the locking is more distributed around the driver code, we want a bit
  * of extra utility/tracking out of our acquire-ctx.  This is provided
  * by drm_modeset_lock / drm_modeset_acquire_ctx.
  *
- * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
+ * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
  *
- * The basic usage pattern is to:
+ * The basic usage pattern is to::
  *
  *     drm_modeset_acquire_init(&ctx)
  *     retry:
  *     ... do stuff ...
  *     drm_modeset_drop_locks(&ctx);
  *     drm_modeset_acquire_fini(&ctx);
+ *
+ *  On top of of these per-object locks using &ww_mutex there's also an overall
+ *  dev->mode_config.lock, for protecting everything else. Mostly this means
+ *  probe state of connectors, and preventing hotplug add/removal of connectors.
+ *
+ *  Finally there's a bunch of dedicated locks to protect drm core internal
+ *  lists and lookup data structures.
  */
 
 /**
index 29d5a548d07a70c1fad5b5e13dc7413dd509c1b0..b2f8f1062d5fc85daa624fcd3e0a11383b83f6b2 100644 (file)
@@ -144,50 +144,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
 }
 EXPORT_SYMBOL(drm_pci_set_busid);
 
-int drm_pci_set_unique(struct drm_device *dev,
-                      struct drm_master *master,
-                      struct drm_unique *u)
-{
-       int domain, bus, slot, func, ret;
-
-       master->unique_len = u->unique_len;
-       master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
-       if (!master->unique) {
-               ret = -ENOMEM;
-               goto err;
-       }
-
-       if (copy_from_user(master->unique, u->unique, master->unique_len)) {
-               ret = -EFAULT;
-               goto err;
-       }
-
-       master->unique[master->unique_len] = '\0';
-
-       /* Return error if the busid submitted doesn't match the device's actual
-        * busid.
-        */
-       ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
-       if (ret != 3) {
-               ret = -EINVAL;
-               goto err;
-       }
-
-       domain = bus >> 8;
-       bus &= 0xff;
-
-       if ((domain != drm_get_pci_domain(dev)) ||
-           (bus != dev->pdev->bus->number) ||
-           (slot != PCI_SLOT(dev->pdev->devfn)) ||
-           (func != PCI_FUNC(dev->pdev->devfn))) {
-               ret = -EINVAL;
-               goto err;
-       }
-       return 0;
-err:
-       return ret;
-}
-
 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
 {
        if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
@@ -444,13 +400,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
 {
        return -EINVAL;
 }
-
-int drm_pci_set_unique(struct drm_device *dev,
-                      struct drm_master *master,
-                      struct drm_unique *u)
-{
-       return -EINVAL;
-}
 #endif
 
 EXPORT_SYMBOL(drm_pci_init);
index 369d2898ff9ecdfc452818235642ef5d6502a641..16c4a7bd7465800c5cbcd3435d6823829cb41daf 100644 (file)
@@ -115,6 +115,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
  * @src: source coordinates in 16.16 fixed point
  * @dest: integer destination coordinates
  * @clip: integer clipping coordinates
+ * @rotation: plane rotation
  * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
  * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
  * @can_position: is it legal to position the plane such that it
@@ -134,16 +135,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
  * Zero if update appears valid, error code on failure
  */
 int drm_plane_helper_check_update(struct drm_plane *plane,
-                                   struct drm_crtc *crtc,
-                                   struct drm_framebuffer *fb,
-                                   struct drm_rect *src,
-                                   struct drm_rect *dest,
-                                   const struct drm_rect *clip,
-                                   int min_scale,
-                                   int max_scale,
-                                   bool can_position,
-                                   bool can_update_disabled,
-                                   bool *visible)
+                                 struct drm_crtc *crtc,
+                                 struct drm_framebuffer *fb,
+                                 struct drm_rect *src,
+                                 struct drm_rect *dest,
+                                 const struct drm_rect *clip,
+                                 unsigned int rotation,
+                                 int min_scale,
+                                 int max_scale,
+                                 bool can_position,
+                                 bool can_update_disabled,
+                                 bool *visible)
 {
        int hscale, vscale;
 
@@ -163,6 +165,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
                return -EINVAL;
        }
 
+       drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
+
        /* Check scaling */
        hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
        vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
@@ -174,6 +178,9 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
        }
 
        *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
+
+       drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
+
        if (!*visible)
                /*
                 * Plane isn't visible; some drivers can handle this
@@ -219,10 +226,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update);
  *
  * Note that we make some assumptions about hardware limitations that may not be
  * true for all hardware --
- *   1) Primary plane cannot be repositioned.
- *   2) Primary plane cannot be scaled.
- *   3) Primary plane must cover the entire CRTC.
- *   4) Subpixel positioning is not supported.
+ *
+ * 1. Primary plane cannot be repositioned.
+ * 2. Primary plane cannot be scaled.
+ * 3. Primary plane must cover the entire CRTC.
+ * 4. Subpixel positioning is not supported.
+ *
  * Drivers for hardware that don't have these restrictions can provide their
  * own implementation rather than using this helper.
  *
@@ -265,6 +274,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
 
        ret = drm_plane_helper_check_update(plane, crtc, fb,
                                            &src, &dest, &clip,
+                                           BIT(DRM_ROTATE_0),
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            false, false, &visible);
index 644169e1a0296fe6fa07c91cc3c8520bf9588c4a..2c819ef90090dee3b8c154d5065fd77041e66f6b 100644 (file)
@@ -68,24 +68,6 @@ err_free:
        return ret;
 }
 
-int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
-{
-       int id;
-
-       id = dev->platformdev->id;
-       if (id < 0)
-               id = 0;
-
-       master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d",
-                                               dev->platformdev->name, id);
-       if (!master->unique)
-               return -ENOMEM;
-
-       master->unique_len = strlen(master->unique);
-       return 0;
-}
-EXPORT_SYMBOL(drm_platform_set_busid);
-
 /**
  * drm_platform_init - Register a platform device with the DRM subsystem
  * @driver: DRM device driver
index aab0f3f1f42d5528efb184770d9f5bc9d949ce9d..780589b420a4f24ece0d64868e22d89a40bd86d9 100644 (file)
@@ -593,7 +593,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
                get_dma_buf(dma_buf);
        }
 
-       /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+       /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
        ret = drm_gem_handle_create_tail(file_priv, obj, handle);
        drm_gem_object_unreference_unlocked(obj);
        if (ret)
@@ -601,11 +601,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
 
        ret = drm_prime_add_buf_handle(&file_priv->prime,
                        dma_buf, *handle);
+       mutex_unlock(&file_priv->prime.lock);
        if (ret)
                goto fail;
 
-       mutex_unlock(&file_priv->prime.lock);
-
        dma_buf_put(dma_buf);
 
        return 0;
@@ -615,11 +614,14 @@ fail:
         * to detach.. which seems ok..
         */
        drm_gem_handle_delete(file_priv, *handle);
+       dma_buf_put(dma_buf);
+       return ret;
+
 out_unlock:
        mutex_unlock(&dev->object_name_lock);
 out_put:
-       dma_buf_put(dma_buf);
        mutex_unlock(&file_priv->prime.lock);
+       dma_buf_put(dma_buf);
        return ret;
 }
 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
index 0329080d7f7cb5410a1d903a606d6a38d1b0c74e..a0df377d7d1c4466a71860db90bced926273eac5 100644 (file)
@@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
 
 static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
 {
+       struct drm_cmdline_mode *cmdline_mode;
        struct drm_display_mode *mode;
 
-       if (!connector->cmdline_mode.specified)
+       cmdline_mode = &connector->cmdline_mode;
+       if (!cmdline_mode->specified)
                return 0;
 
+       /* Only add a GTF mode if we find no matching probed modes */
+       list_for_each_entry(mode, &connector->probed_modes, head) {
+               if (mode->hdisplay != cmdline_mode->xres ||
+                   mode->vdisplay != cmdline_mode->yres)
+                       continue;
+
+               if (cmdline_mode->refresh_specified) {
+                       /* The probed mode's vrefresh is set until later */
+                       if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
+                               continue;
+               }
+
+               return 0;
+       }
+
        mode = drm_mode_create_from_cmdline_mode(connector->dev,
-                                                &connector->cmdline_mode);
+                                                cmdline_mode);
        if (mode == NULL)
                return 0;
 
index 4f0f3b36d537c258dbf28fd0dde4dd962ed98ae4..bf70431073f654a5ec52d4276c8fcb33f306ce04 100644 (file)
@@ -41,7 +41,7 @@
 static inline void *drm_vmalloc_dma(unsigned long size)
 {
 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
-       return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+       return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
 #else
        return vmalloc_32(size);
 #endif
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
new file mode 100644 (file)
index 0000000..0db36d2
--- /dev/null
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <linux/slab.h>
+
+/**
+ * DOC: overview
+ *
+ * This helper library provides helpers for drivers for simple display
+ * hardware.
+ *
+ * drm_simple_display_pipe_init() initializes a simple display pipeline
+ * which has only one full-screen scanout buffer feeding one output. The
+ * pipeline is represented by struct &drm_simple_display_pipe and binds
+ * together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed
+ * entity. Some flexibility for code reuse is provided through a separately
+ * allocated &drm_connector object and supporting optional &drm_bridge
+ * encoder drivers.
+ */
+
+static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
+{
+       struct drm_simple_display_pipe *pipe;
+
+       pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
+       if (!pipe->funcs || !pipe->funcs->enable)
+               return;
+
+       pipe->funcs->enable(pipe, crtc->state);
+}
+
+static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
+{
+       struct drm_simple_display_pipe *pipe;
+
+       pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
+       if (!pipe->funcs || !pipe->funcs->disable)
+               return;
+
+       pipe->funcs->disable(pipe);
+}
+
+static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
+       .disable = drm_simple_kms_crtc_disable,
+       .enable = drm_simple_kms_crtc_enable,
+};
+
+static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
+       .reset = drm_atomic_helper_crtc_reset,
+       .destroy = drm_crtc_cleanup,
+       .set_config = drm_atomic_helper_set_config,
+       .page_flip = drm_atomic_helper_page_flip,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
+                                       struct drm_plane_state *plane_state)
+{
+       struct drm_rect src = {
+               .x1 = plane_state->src_x,
+               .y1 = plane_state->src_y,
+               .x2 = plane_state->src_x + plane_state->src_w,
+               .y2 = plane_state->src_y + plane_state->src_h,
+       };
+       struct drm_rect dest = {
+               .x1 = plane_state->crtc_x,
+               .y1 = plane_state->crtc_y,
+               .x2 = plane_state->crtc_x + plane_state->crtc_w,
+               .y2 = plane_state->crtc_y + plane_state->crtc_h,
+       };
+       struct drm_rect clip = { 0 };
+       struct drm_simple_display_pipe *pipe;
+       struct drm_crtc_state *crtc_state;
+       bool visible;
+       int ret;
+
+       pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+       crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+                                                       &pipe->crtc);
+       if (crtc_state->enable != !!plane_state->crtc)
+               return -EINVAL; /* plane must match crtc enable state */
+
+       if (!crtc_state->enable)
+               return 0; /* nothing to check when disabling or disabled */
+
+       clip.x2 = crtc_state->adjusted_mode.hdisplay;
+       clip.y2 = crtc_state->adjusted_mode.vdisplay;
+       ret = drm_plane_helper_check_update(plane, &pipe->crtc,
+                                           plane_state->fb,
+                                           &src, &dest, &clip,
+                                           plane_state->rotation,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           DRM_PLANE_HELPER_NO_SCALING,
+                                           false, true, &visible);
+       if (ret)
+               return ret;
+
+       if (!visible)
+               return -EINVAL;
+
+       if (!pipe->funcs || !pipe->funcs->check)
+               return 0;
+
+       return pipe->funcs->check(pipe, plane_state, crtc_state);
+}
+
+static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
+                                       struct drm_plane_state *pstate)
+{
+       struct drm_simple_display_pipe *pipe;
+
+       pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+       if (!pipe->funcs || !pipe->funcs->update)
+               return;
+
+       pipe->funcs->update(pipe, pstate);
+}
+
+static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
+       .atomic_check = drm_simple_kms_plane_atomic_check,
+       .atomic_update = drm_simple_kms_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
+       .update_plane           = drm_atomic_helper_update_plane,
+       .disable_plane          = drm_atomic_helper_disable_plane,
+       .destroy                = drm_plane_cleanup,
+       .reset                  = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
+};
+
+/**
+ * drm_simple_display_pipe_init - Initialize a simple display pipeline
+ * @dev: DRM device
+ * @pipe: simple display pipe object to initialize
+ * @funcs: callbacks for the display pipe (optional)
+ * @formats: array of supported formats (%DRM_FORMAT_*)
+ * @format_count: number of elements in @formats
+ * @connector: connector to attach and register
+ *
+ * Sets up a display pipeline which consist of a really simple
+ * plane-crtc-encoder pipe coupled with the provided connector.
+ * Teardown of a simple display pipe is all handled automatically by the drm
+ * core through calling drm_mode_config_cleanup(). Drivers afterwards need to
+ * release the memory for the structure themselves.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int drm_simple_display_pipe_init(struct drm_device *dev,
+                       struct drm_simple_display_pipe *pipe,
+                       const struct drm_simple_display_pipe_funcs *funcs,
+                       const uint32_t *formats, unsigned int format_count,
+                       struct drm_connector *connector)
+{
+       struct drm_encoder *encoder = &pipe->encoder;
+       struct drm_plane *plane = &pipe->plane;
+       struct drm_crtc *crtc = &pipe->crtc;
+       int ret;
+
+       pipe->connector = connector;
+       pipe->funcs = funcs;
+
+       drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs);
+       ret = drm_universal_plane_init(dev, plane, 0,
+                                      &drm_simple_kms_plane_funcs,
+                                      formats, format_count,
+                                      DRM_PLANE_TYPE_PRIMARY, NULL);
+       if (ret)
+               return ret;
+
+       drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs);
+       ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+                                       &drm_simple_kms_crtc_funcs, NULL);
+       if (ret)
+               return ret;
+
+       encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
+       ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
+                              DRM_MODE_ENCODER_NONE, NULL);
+       if (ret)
+               return ret;
+
+       return drm_mode_connector_attach_encoder(connector, encoder);
+}
+EXPORT_SYMBOL(drm_simple_display_pipe_init);
+
+MODULE_LICENSE("GPL");
index fa7fadce8063fec0933ab14b6c64b43ac28adcd2..32dd821b7202a5492a5c7b184c775c329e7e2fb1 100644 (file)
@@ -32,75 +32,6 @@ static struct device_type drm_sysfs_device_minor = {
 
 struct class *drm_class;
 
-/**
- * __drm_class_suspend - internal DRM class suspend routine
- * @dev: Linux device to suspend
- * @state: power state to enter
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its suspend hook, if present.
- */
-static int __drm_class_suspend(struct device *dev, pm_message_t state)
-{
-       if (dev->type == &drm_sysfs_device_minor) {
-               struct drm_minor *drm_minor = to_drm_minor(dev);
-               struct drm_device *drm_dev = drm_minor->dev;
-
-               if (drm_minor->type == DRM_MINOR_LEGACY &&
-                   !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
-                   drm_dev->driver->suspend)
-                       return drm_dev->driver->suspend(drm_dev, state);
-       }
-       return 0;
-}
-
-/**
- * drm_class_suspend - internal DRM class suspend hook. Simply calls
- * __drm_class_suspend() with the correct pm state.
- * @dev: Linux device to suspend
- */
-static int drm_class_suspend(struct device *dev)
-{
-       return __drm_class_suspend(dev, PMSG_SUSPEND);
-}
-
-/**
- * drm_class_freeze - internal DRM class freeze hook. Simply calls
- * __drm_class_suspend() with the correct pm state.
- * @dev: Linux device to freeze
- */
-static int drm_class_freeze(struct device *dev)
-{
-       return __drm_class_suspend(dev, PMSG_FREEZE);
-}
-
-/**
- * drm_class_resume - DRM class resume hook
- * @dev: Linux device to resume
- *
- * Just figures out what the actual struct drm_device associated with
- * @dev is and calls its resume hook, if present.
- */
-static int drm_class_resume(struct device *dev)
-{
-       if (dev->type == &drm_sysfs_device_minor) {
-               struct drm_minor *drm_minor = to_drm_minor(dev);
-               struct drm_device *drm_dev = drm_minor->dev;
-
-               if (drm_minor->type == DRM_MINOR_LEGACY &&
-                   !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
-                   drm_dev->driver->resume)
-                       return drm_dev->driver->resume(drm_dev);
-       }
-       return 0;
-}
-
-static const struct dev_pm_ops drm_class_dev_pm_ops = {
-       .suspend        = drm_class_suspend,
-       .resume         = drm_class_resume,
-       .freeze         = drm_class_freeze,
-};
-
 static char *drm_devnode(struct device *dev, umode_t *mode)
 {
        return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -131,8 +62,6 @@ int drm_sysfs_init(void)
        if (IS_ERR(drm_class))
                return PTR_ERR(drm_class);
 
-       drm_class->pm = &drm_class_dev_pm_ops;
-
        err = class_create_file(drm_class, &class_attr_version.attr);
        if (err) {
                class_destroy(drm_class);
index ac9f4b3ec615c5fb73cd00c4947d985ab4fa18b4..caa4e4ca616dbe3100c006f0453d504942049e26 100644 (file)
@@ -80,7 +80,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
        pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
 
 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
-       tmp |= _PAGE_NO_CACHE;
+       tmp = pgprot_noncached_wc(tmp);
 #endif
        return tmp;
 }
@@ -593,7 +593,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
                         * pages and mappings in fault()
                         */
 #if defined(__powerpc__)
-                       pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+                       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 #endif
                        vma->vm_ops = &drm_vm_ops;
                        break;
@@ -670,57 +670,3 @@ void drm_legacy_vma_flush(struct drm_device *dev)
                kfree(vma);
        }
 }
-
-int drm_vma_info(struct seq_file *m, void *data)
-{
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct drm_vma_entry *pt;
-       struct vm_area_struct *vma;
-       unsigned long vma_count = 0;
-#if defined(__i386__)
-       unsigned int pgprot;
-#endif
-
-       mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(pt, &dev->vmalist, head)
-               vma_count++;
-
-       seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
-                  vma_count, high_memory,
-                  (void *)(unsigned long)virt_to_phys(high_memory));
-
-       list_for_each_entry(pt, &dev->vmalist, head) {
-               vma = pt->vma;
-               if (!vma)
-                       continue;
-               seq_printf(m,
-                          "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
-                          pt->pid,
-                          (void *)vma->vm_start, (void *)vma->vm_end,
-                          vma->vm_flags & VM_READ ? 'r' : '-',
-                          vma->vm_flags & VM_WRITE ? 'w' : '-',
-                          vma->vm_flags & VM_EXEC ? 'x' : '-',
-                          vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
-                          vma->vm_flags & VM_LOCKED ? 'l' : '-',
-                          vma->vm_flags & VM_IO ? 'i' : '-',
-                          vma->vm_pgoff);
-
-#if defined(__i386__)
-               pgprot = pgprot_val(vma->vm_page_prot);
-               seq_printf(m, " %c%c%c%c%c%c%c%c%c",
-                          pgprot & _PAGE_PRESENT ? 'p' : '-',
-                          pgprot & _PAGE_RW ? 'w' : 'r',
-                          pgprot & _PAGE_USER ? 'u' : 's',
-                          pgprot & _PAGE_PWT ? 't' : 'b',
-                          pgprot & _PAGE_PCD ? 'u' : 'c',
-                          pgprot & _PAGE_ACCESSED ? 'a' : '-',
-                          pgprot & _PAGE_DIRTY ? 'd' : '-',
-                          pgprot & _PAGE_PSE ? 'm' : 'k',
-                          pgprot & _PAGE_GLOBAL ? 'g' : 'l');
-#endif
-               seq_printf(m, "\n");
-       }
-       mutex_unlock(&dev->struct_mutex);
-       return 0;
-}
index 2f2ecde8285bda5fdaa77aaf47fdb112fbed6db8..f306c885597869051eafd3924fb312bf78c6cf49 100644 (file)
@@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
  * used to implement weakly referenced lookups using kref_get_unless_zero().
  *
  * Example:
+ *
+ * ::
+ *
  *     drm_vma_offset_lock_lookup(mgr);
  *     node = drm_vma_offset_lookup_locked(mgr);
  *     if (node)
index 3d4f56df8359e6ae81a77c574e9e9a42105b6963..ffd1b32caa8d5b1b3a00b1dfbdf065b375e46496 100644 (file)
@@ -91,10 +91,8 @@ static void load_gpu(struct drm_device *dev)
                        int ret;
 
                        ret = etnaviv_gpu_init(g);
-                       if (ret) {
-                               dev_err(g->dev, "hw init failed: %d\n", ret);
+                       if (ret)
                                priv->gpu[i] = NULL;
-                       }
                }
        }
 }
@@ -496,7 +494,6 @@ static struct drm_driver etnaviv_drm_driver = {
                                DRIVER_RENDER,
        .open               = etnaviv_open,
        .preclose           = etnaviv_preclose,
-       .set_busid          = drm_platform_set_busid,
        .gem_free_object_unlocked = etnaviv_gem_free_object,
        .gem_vm_ops         = &vm_ops,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
index ff6aa5dfb2d7574556f9f8d97310c4c127f1c212..87ef34150d466903ea128eddf599718eb5836dcb 100644 (file)
@@ -487,6 +487,47 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
        return 0;
 }
 
+static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
+{
+       u32 pmc, ppc;
+
+       /* enable clock gating */
+       ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
+       ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
+
+       /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
+       if (gpu->identity.revision == 0x4301 ||
+           gpu->identity.revision == 0x4302)
+               ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
+
+       gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
+
+       pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
+
+       /* Disable PA clock gating for GC400+ except for GC420 */
+       if (gpu->identity.model >= chipModel_GC400 &&
+           gpu->identity.model != chipModel_GC420)
+               pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
+
+       /*
+        * Disable PE clock gating on revs < 5.0.0.0 when HZ is
+        * present without a bug fix.
+        */
+       if (gpu->identity.revision < 0x5000 &&
+           gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
+           !(gpu->identity.minor_features1 &
+             chipMinorFeatures1_DISABLE_PE_GATING))
+               pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
+
+       if (gpu->identity.revision < 0x5422)
+               pmc |= BIT(15); /* Unknown bit */
+
+       pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
+       pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
+
+       gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
+}
+
 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 {
        u16 prefetch;
@@ -506,6 +547,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
                gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
        }
 
+       /* enable module-level clock gating */
+       etnaviv_gpu_enable_mlcg(gpu);
+
        /*
         * Update GPU AXI cache atttribute to "cacheable, no allocate".
         * This is necessary to prevent the iMX6 SoC locking up.
@@ -553,8 +597,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        bool mmuv2;
 
        ret = pm_runtime_get_sync(gpu->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               dev_err(gpu->dev, "Failed to enable GPU power domain\n");
                return ret;
+       }
 
        etnaviv_hw_identify(gpu);
 
@@ -591,8 +637,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        }
 
        ret = etnaviv_hw_reset(gpu);
-       if (ret)
+       if (ret) {
+               dev_err(gpu->dev, "GPU reset failed\n");
                goto fail;
+       }
 
        /* Setup IOMMU.. eventually we will (I think) do this once per context
         * and have separate page tables per context.  For now, to keep things
@@ -610,12 +658,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        }
 
        if (!iommu) {
+               dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
                ret = -ENOMEM;
                goto fail;
        }
 
        gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
        if (!gpu->mmu) {
+               dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
                iommu_domain_free(iommu);
                ret = -ENOMEM;
                goto fail;
index f5321e2f25ffd293caa19a6fca32672adfe9a939..a69cdd526bf83149af1ee486c34d96a6e1913123 100644 (file)
@@ -125,7 +125,7 @@ struct etnaviv_gpu {
        u32 completed_fence;
        u32 retired_fence;
        wait_queue_head_t fence_event;
-       unsigned int fence_context;
+       u64 fence_context;
        spinlock_t fence_spinlock;
 
        /* worker for handling active-list retiring: */
index 522cfd4478922d2ccde1f4b667ca39db761232e9..16353ee816516ef33348832ef31a5a089a95829b 100644 (file)
@@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
 
        etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
        etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+       etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
        etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
        etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
 
index 6a7de5f1454a51532c0809e2e741c53db4fba4e9..807a3d9e0dd5af0a8e0f3944fa748a41ac16c021 100644 (file)
@@ -218,6 +218,13 @@ Copyright (C) 2015
 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH 0x00000008
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA 0x00000010
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE 0x00000020
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA 0x00000040
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX 0x00000080
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ      0x00010000
+#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ      0x00020000
 
 #define VIVS_PM_MODULE_STATUS                                  0x00000108
 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE            0x00000001
index d814b3048ee5bfe1b0ee08cccfcfd8ff9e6ad1de..83f61c513b7e84f49e13eb037abda0a141854ed5 100644 (file)
@@ -2,10 +2,6 @@ config DRM_EXYNOS
        tristate "DRM Support for Samsung SoC EXYNOS Series"
        depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select VIDEOMODE_HELPERS
        help
          Choose this option if you have a Samsung SoC EXYNOS chipset.
@@ -15,7 +11,7 @@ if DRM_EXYNOS
 
 config DRM_EXYNOS_IOMMU
        bool
-       depends on EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+       depends on EXYNOS_IOMMU
        default y
 
 comment "CRTCs"
index f6223f907c158fbdfcf176da6aef13536966c5bb..7f9901b7777b36786380c447c784418f4d7e3b07 100644 (file)
@@ -31,7 +31,6 @@
 #include "exynos_drm_plane.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
 #include "exynos_drm_iommu.h"
 
 /*
index 468498e3fec15923f25476b163a3f82535a34a80..4f0850585b8e0312344757104dc35c0e793234ad 100644 (file)
@@ -34,7 +34,7 @@
 
 struct exynos_dp_device {
        struct drm_encoder         encoder;
-       struct drm_connector       connector;
+       struct drm_connector       *connector;
        struct drm_bridge          *ptn_bridge;
        struct drm_device          *drm_dev;
        struct device              *dev;
@@ -67,10 +67,10 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
        return exynos_dp_crtc_clock_enable(plat_data, false);
 }
 
-static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
+static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+                              struct drm_connector *connector)
 {
        struct exynos_dp_device *dp = to_dp(plat_data);
-       struct drm_connector *connector = &dp->connector;
        struct drm_display_mode *mode;
        int num_modes = 0;
 
@@ -103,6 +103,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
        int ret;
 
        drm_connector_register(connector);
+       dp->connector = connector;
 
        /* Pre-empt DP connector creation if there's a bridge */
        if (dp->ptn_bridge) {
index 011211e4167d41dce17d56e8baee0b168b37e7b6..edbd98ff293ec913008e7ec60cee20d85c54fa56 100644 (file)
@@ -15,7 +15,6 @@
 #include <drm/drmP.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
-#include "exynos_drm_fbdev.h"
 
 static LIST_HEAD(exynos_drm_subdrv_list);
 
index 5e38e749ac17e17898d4bcc54ad90d4b08301047..ad6b73c7fc59725e4aa3a3e77246ad348e993a1a 100644 (file)
@@ -93,17 +93,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
        return 0;
 }
 
-static struct drm_encoder *
-exynos_dpi_best_encoder(struct drm_connector *connector)
-{
-       struct exynos_dpi *ctx = connector_to_dpi(connector);
-
-       return &ctx->encoder;
-}
-
 static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
        .get_modes = exynos_dpi_get_modes,
-       .best_encoder = exynos_dpi_best_encoder,
 };
 
 static int exynos_dpi_create_connector(struct drm_encoder *encoder)
index 2dd820e23b0cb43b45f047798d2e5e69b141b4a1..877d2efa28e2171a1d2e07f4feef9a9fdd1eac7c 100644 (file)
@@ -159,12 +159,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
        DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
                 dev_name(private->dma_dev));
 
-       /*
-        * create mapping to manage iommu table and set a pointer to iommu
-        * mapping structure to iommu_mapping of private data.
-        * also this iommu_mapping can be used to check if iommu is supported
-        * or not.
-        */
+       /* create common IOMMU mapping for all devices attached to Exynos DRM */
        ret = drm_create_iommu_mapping(dev);
        if (ret < 0) {
                DRM_ERROR("failed to create iommu mapping.\n");
@@ -267,6 +262,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
 {
        struct exynos_drm_private *priv = dev->dev_private;
        struct exynos_atomic_commit *commit;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
        int i, ret;
 
        commit = kzalloc(sizeof(*commit), GFP_KERNEL);
@@ -288,10 +285,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
        /* Wait until all affected CRTCs have completed previous commits and
         * mark them as pending.
         */
-       for (i = 0; i < dev->mode_config.num_crtc; ++i) {
-               if (state->crtcs[i])
-                       commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
-       }
+       for_each_crtc_in_state(state, crtc, crtc_state, i)
+               commit->crtcs |= drm_crtc_mask(crtc);
 
        wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
 
@@ -299,7 +294,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
        priv->pending |= commit->crtcs;
        spin_unlock(&priv->lock);
 
-       drm_atomic_helper_swap_state(dev, state);
+       drm_atomic_helper_swap_state(state, true);
 
        if (nonblock)
                schedule_work(&commit->work);
@@ -407,7 +402,6 @@ static struct drm_driver exynos_drm_driver = {
        .preclose               = exynos_drm_preclose,
        .lastclose              = exynos_drm_lastclose,
        .postclose              = exynos_drm_postclose,
-       .set_busid              = drm_platform_set_busid,
        .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = exynos_drm_crtc_enable_vblank,
        .disable_vblank         = exynos_drm_crtc_disable_vblank,
index cc33ec9296e7e15b80cf110cc3f6f860a2e226de..b39d521f093d71d344427a32f120d0a83235b46b 100644 (file)
@@ -224,8 +224,6 @@ struct exynos_drm_private {
        struct drm_property *plane_zpos_property;
 
        struct device *dma_dev;
-       unsigned long da_start;
-       unsigned long da_space_size;
        void *mapping;
 
        unsigned int pipe;
index 601ecf8006a7be748b93fcd47036ee64b3336e53..e07cb1fe4860440596debe0636877afe968d4470 100644 (file)
@@ -1566,17 +1566,8 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
        return 0;
 }
 
-static struct drm_encoder *
-exynos_dsi_best_encoder(struct drm_connector *connector)
-{
-       struct exynos_dsi *dsi = connector_to_dsi(connector);
-
-       return &dsi->encoder;
-}
-
 static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
        .get_modes = exynos_dsi_get_modes,
-       .best_encoder = exynos_dsi_best_encoder,
 };
 
 static int exynos_dsi_create_connector(struct drm_encoder *encoder)
index 67dcd6831291fac5f96f26d29ecd9dae8476d8e9..fb49443bfd323be91c91993ad83ef7e053b568d8 100644 (file)
@@ -269,8 +269,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
        struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
        struct drm_framebuffer *fb;
 
-       if (exynos_gem->kvaddr)
-               vunmap(exynos_gem->kvaddr);
+       vunmap(exynos_gem->kvaddr);
 
        /* release drm framebuffer and real buffer */
        if (fb_helper->fb && fb_helper->fb->funcs) {
index 3efe1aa89416701223745cec1ec587fb3ef0e2db..d4721648898524e9dc76ee6476dd9a068c52520a 100644 (file)
@@ -30,7 +30,6 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
 #include "exynos_drm_iommu.h"
@@ -120,7 +119,6 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
        .timing_base = 0x0,
        .has_clksel = 1,
        .has_limited_fmt = 1,
-       .has_hw_trigger = 1,
 };
 
 static struct fimd_driver_data exynos3_fimd_driver_data = {
@@ -171,14 +169,11 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
        .lcdblk_vt_shift = 24,
        .lcdblk_bypass_shift = 15,
        .lcdblk_mic_bypass_shift = 11,
-       .trg_type = I80_HW_TRG,
        .has_shadowcon = 1,
        .has_vidoutcon = 1,
        .has_vtsel = 1,
        .has_mic_bypass = 1,
        .has_dp_clk = 1,
-       .has_hw_trigger = 1,
-       .has_trigger_per_te = 1,
 };
 
 struct fimd_context {
index 493552368295288c091dff811ab02c7939dac2e0..8564c3da0d222568bdd41cf8063d097dcf665081 100644 (file)
 
 /* registers for base address */
 #define G2D_SRC_BASE_ADDR              0x0304
-#define G2D_SRC_STRIDE_REG             0x0308
+#define G2D_SRC_STRIDE                 0x0308
 #define G2D_SRC_COLOR_MODE             0x030C
 #define G2D_SRC_LEFT_TOP               0x0310
 #define G2D_SRC_RIGHT_BOTTOM           0x0314
 #define G2D_SRC_PLANE2_BASE_ADDR       0x0318
 #define G2D_DST_BASE_ADDR              0x0404
-#define G2D_DST_STRIDE_REG             0x0408
+#define G2D_DST_STRIDE                 0x0408
 #define G2D_DST_COLOR_MODE             0x040C
 #define G2D_DST_LEFT_TOP               0x0410
 #define G2D_DST_RIGHT_BOTTOM           0x0414
@@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
 
        switch (reg_offset) {
        case G2D_SRC_BASE_ADDR:
-       case G2D_SRC_STRIDE_REG:
+       case G2D_SRC_STRIDE:
        case G2D_SRC_COLOR_MODE:
        case G2D_SRC_LEFT_TOP:
        case G2D_SRC_RIGHT_BOTTOM:
@@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
                reg_type = REG_TYPE_SRC_PLANE2;
                break;
        case G2D_DST_BASE_ADDR:
-       case G2D_DST_STRIDE_REG:
+       case G2D_DST_STRIDE:
        case G2D_DST_COLOR_MODE:
        case G2D_DST_LEFT_TOP:
        case G2D_DST_RIGHT_BOTTOM:
@@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev,
                        } else
                                buf_info->types[reg_type] = BUF_TYPE_GEM;
                        break;
-               case G2D_SRC_STRIDE_REG:
-               case G2D_DST_STRIDE_REG:
+               case G2D_SRC_STRIDE:
+               case G2D_DST_STRIDE:
                        if (for_addr)
                                goto err;
 
index 7ca09ee196560bf9c0361371d1f0aeb652dde38b..0f373702414e32342fa98d09ac597d3996f24c58 100644 (file)
 
 #include <linux/dma-mapping.h>
 #include <linux/iommu.h>
-#include <linux/kref.h>
-
-#include <asm/dma-iommu.h>
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_iommu.h"
 
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+       if (!dev->dma_parms)
+               dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+       if (!dev->dma_parms)
+               return -ENOMEM;
+
+       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+       return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+       kfree(dev->dma_parms);
+       dev->dma_parms = NULL;
+}
+
 /*
  * drm_create_iommu_mapping - create a mapping structure
  *
  */
 int drm_create_iommu_mapping(struct drm_device *drm_dev)
 {
-       struct dma_iommu_mapping *mapping = NULL;
        struct exynos_drm_private *priv = drm_dev->dev_private;
 
-       if (!priv->da_start)
-               priv->da_start = EXYNOS_DEV_ADDR_START;
-       if (!priv->da_space_size)
-               priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
-
-       mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
-                                               priv->da_space_size);
-
-       if (IS_ERR(mapping))
-               return PTR_ERR(mapping);
-
-       priv->mapping = mapping;
-
-       return 0;
+       return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
+                                            EXYNOS_DEV_ADDR_SIZE);
 }
 
 /*
  * drm_release_iommu_mapping - release iommu mapping structure
  *
  * @drm_dev: DRM device
- *
- * if mapping->kref becomes 0 then all things related to iommu mapping
- * will be released
  */
 void drm_release_iommu_mapping(struct drm_device *drm_dev)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
 
-       arm_iommu_release_mapping(priv->mapping);
+       __exynos_iommu_release_mapping(priv);
 }
 
 /*
@@ -77,25 +75,19 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
        struct exynos_drm_private *priv = drm_dev->dev_private;
        int ret;
 
-       if (!priv->mapping)
-               return 0;
-
-       subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
-                                       sizeof(*subdrv_dev->dma_parms),
-                                       GFP_KERNEL);
-       if (!subdrv_dev->dma_parms)
-               return -ENOMEM;
-
-       dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
-
-       if (subdrv_dev->archdata.mapping)
-               arm_iommu_detach_device(subdrv_dev);
+       if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+               DRM_ERROR("Device %s lacks support for IOMMU\n",
+                         dev_name(subdrv_dev));
+               return -EINVAL;
+       }
 
-       ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
-       if (ret < 0) {
-               DRM_DEBUG_KMS("failed iommu attach.\n");
+       ret = configure_dma_max_seg_size(subdrv_dev);
+       if (ret)
                return ret;
-       }
+
+       ret = __exynos_iommu_attach(priv, subdrv_dev);
+       if (ret)
+               clear_dma_max_seg_size(subdrv_dev);
 
        return 0;
 }
@@ -113,10 +105,7 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
                                struct device *subdrv_dev)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
-       struct dma_iommu_mapping *mapping = priv->mapping;
-
-       if (!mapping || !mapping->domain)
-               return;
 
-       arm_iommu_detach_device(subdrv_dev);
+       __exynos_iommu_detach(priv, subdrv_dev);
+       clear_dma_max_seg_size(subdrv_dev);
 }
index 5ffebe02ee4d3ebd29dda3960307ea1ed0a5d2cb..c8de4913fdbe80a3962ebca2744236f523e768b0 100644 (file)
 
 #ifdef CONFIG_DRM_EXYNOS_IOMMU
 
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+                                       unsigned long start, unsigned long size)
+{
+       priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
+                                                size);
+       return IS_ERR(priv->mapping);
+}
+
+static inline void
+__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+       arm_iommu_release_mapping(priv->mapping);
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+                                       struct device *dev)
+{
+       if (dev->archdata.mapping)
+               arm_iommu_detach_device(dev);
+
+       return arm_iommu_attach_device(dev, priv->mapping);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+                                        struct device *dev)
+{
+       arm_iommu_detach_device(dev);
+}
+
+#elif defined(CONFIG_IOMMU_DMA)
+#include <linux/dma-iommu.h>
+
+static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
+                                       unsigned long start, unsigned long size)
+{
+       struct iommu_domain *domain;
+       int ret;
+
+       domain = iommu_domain_alloc(priv->dma_dev->bus);
+       if (!domain)
+               return -ENOMEM;
+
+       ret = iommu_get_dma_cookie(domain);
+       if (ret)
+               goto free_domain;
+
+       ret = iommu_dma_init_domain(domain, start, size);
+       if (ret)
+               goto put_cookie;
+
+       priv->mapping = domain;
+       return 0;
+
+put_cookie:
+       iommu_put_dma_cookie(domain);
+free_domain:
+       iommu_domain_free(domain);
+       return ret;
+}
+
+static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
+{
+       struct iommu_domain *domain = priv->mapping;
+
+       iommu_put_dma_cookie(domain);
+       iommu_domain_free(domain);
+       priv->mapping = NULL;
+}
+
+static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
+                                       struct device *dev)
+{
+       struct iommu_domain *domain = priv->mapping;
+
+       return iommu_attach_device(domain, dev);
+}
+
+static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
+                                        struct device *dev)
+{
+       struct iommu_domain *domain = priv->mapping;
+
+       iommu_detach_device(domain, dev);
+}
+#else
+#error Unsupported architecture and IOMMU/DMA-mapping glue code
+#endif
+
 int drm_create_iommu_mapping(struct drm_device *drm_dev);
 
 void drm_release_iommu_mapping(struct drm_device *drm_dev);
index 55f1d37c666a9cde7d2fd9d92322fc31f3294a5c..77f12c00abf94263ab58de839ddaf3349f29ac87 100644 (file)
@@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
            state->v_ratio == (1 << 15))
                height_ok = true;
 
-       if (width_ok & height_ok)
+       if (width_ok && height_ok)
                return 0;
 
        DRM_DEBUG_KMS("scaling mode is not supported");
index 608b0afa337f5429e5d6eaf6466c66693ece6134..e8f6c92b2a360632ed1f293cb00cafa0e5e4e490 100644 (file)
@@ -378,16 +378,8 @@ static int vidi_get_modes(struct drm_connector *connector)
        return drm_add_edid_modes(connector, edid);
 }
 
-static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
-{
-       struct vidi_context *ctx = ctx_from_connector(connector);
-
-       return &ctx->encoder;
-}
-
 static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
        .get_modes = vidi_get_modes,
-       .best_encoder = vidi_best_encoder,
 };
 
 static int vidi_create_connector(struct drm_encoder *encoder)
index 58de5a4305080bfe3ec15b02c6684b4b2ee19848..2275efe41acd3b73af9bb80bc377666039740fce 100644 (file)
@@ -937,17 +937,9 @@ static int hdmi_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
-{
-       struct hdmi_context *hdata = connector_to_hdmi(connector);
-
-       return &hdata->encoder;
-}
-
 static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
        .get_modes = hdmi_get_modes,
        .mode_valid = hdmi_mode_valid,
-       .best_encoder = hdmi_best_encoder,
 };
 
 static int hdmi_create_connector(struct drm_encoder *encoder)
@@ -1828,6 +1820,7 @@ static int hdmi_probe(struct platform_device *pdev)
                DRM_ERROR("Failed to find ddc node in device tree\n");
                return -ENODEV;
        }
+       of_node_put(dev->of_node);
 
 out_get_ddc_adpt:
        hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
@@ -1846,6 +1839,7 @@ out_get_ddc_adpt:
                ret = -ENODEV;
                goto err_ddc;
        }
+       of_node_put(dev->of_node);
 
 out_get_phy_port:
        if (hdata->drv_data->is_apb_phy) {
index b9c714de6e40cdfd2dbb48b0e5a5311076a286ca..14a72c4c496d2469eccee871f22eb4ccb52eb17a 100644 (file)
@@ -5,12 +5,7 @@ config DRM_FSL_DCU
        select BACKLIGHT_LCD_SUPPORT
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_PANEL
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_SYS_FOPS
        select REGMAP_MMIO
        select VIDEOMODE_HELPERS
        help
index 89c0084c28140091c129a72304ef129186c91a4b..3371635cd4d707192e39104ed1bc13671fc19c69 100644 (file)
 #include "fsl_dcu_drm_drv.h"
 #include "fsl_dcu_drm_plane.h"
 
-static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
                                          struct drm_crtc_state *old_crtc_state)
 {
-}
+       struct drm_pending_vblank_event *event = crtc->state->event;
 
-static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc,
-                                        struct drm_crtc_state *state)
-{
-       return 0;
-}
+       if (event) {
+               crtc->state->event = NULL;
 
-static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
-                                         struct drm_crtc_state *old_crtc_state)
-{
+               spin_lock_irq(&crtc->dev->event_lock);
+               if (drm_crtc_vblank_get(crtc) == 0)
+                       drm_crtc_arm_vblank_event(crtc, event);
+               else
+                       drm_crtc_send_vblank_event(crtc, event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+       }
 }
 
 static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
@@ -43,6 +44,8 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
 
+       drm_crtc_vblank_off(crtc);
+
        regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
                           DCU_MODE_DCU_MODE_MASK,
                           DCU_MODE_DCU_MODE(DCU_MODE_OFF));
@@ -60,6 +63,8 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
                           DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
        regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
                     DCU_UPDATE_MODE_READREG);
+
+       drm_crtc_vblank_on(crtc);
 }
 
 static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
@@ -117,8 +122,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 }
 
 static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
-       .atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
-       .atomic_check = fsl_dcu_drm_crtc_atomic_check,
        .atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
        .disable = fsl_dcu_drm_disable_crtc,
        .enable = fsl_dcu_drm_crtc_enable,
@@ -138,9 +141,10 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
 {
        struct drm_plane *primary;
        struct drm_crtc *crtc = &fsl_dev->crtc;
-       unsigned int i, j, reg_num;
        int ret;
 
+       fsl_dcu_drm_init_planes(fsl_dev->drm);
+
        primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
        if (!primary)
                return -ENOMEM;
@@ -154,19 +158,5 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
 
        drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
 
-       if (!strcmp(fsl_dev->soc->name, "ls1021a"))
-               reg_num = LS1021A_LAYER_REG_NUM;
-       else
-               reg_num = VF610_LAYER_REG_NUM;
-       for (i = 0; i < fsl_dev->soc->total_layer; i++) {
-               for (j = 1; j <= reg_num; j++)
-                       regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
-       }
-       regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
-                          DCU_MODE_DCU_MODE_MASK,
-                          DCU_MODE_DCU_MODE(DCU_MODE_OFF));
-       regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
-                    DCU_UPDATE_MODE_READREG);
-
        return 0;
 }
index 0ec1ad961e0dbee6bf50de70cf05f23592241f51..7882387f9bffd5c661f30bc5800e58672a0a3bd8 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/console.h>
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mm.h>
@@ -22,6 +23,7 @@
 #include <linux/regmap.h>
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
@@ -42,7 +44,6 @@ static const struct regmap_config fsl_dcu_regmap_config = {
        .reg_bits = 32,
        .reg_stride = 4,
        .val_bits = 32,
-       .cache_type = REGCACHE_RBTREE,
 
        .volatile_reg = fsl_dcu_drm_is_volatile_reg,
 };
@@ -198,7 +199,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
        .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = fsl_dcu_drm_enable_vblank,
        .disable_vblank         = fsl_dcu_drm_disable_vblank,
-       .gem_free_object        = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
@@ -228,11 +229,26 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
        if (!fsl_dev)
                return 0;
 
+       disable_irq(fsl_dev->irq);
        drm_kms_helper_poll_disable(fsl_dev->drm);
-       regcache_cache_only(fsl_dev->regmap, true);
-       regcache_mark_dirty(fsl_dev->regmap);
-       clk_disable(fsl_dev->clk);
-       clk_unprepare(fsl_dev->clk);
+
+       console_lock();
+       drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1);
+       console_unlock();
+
+       fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm);
+       if (IS_ERR(fsl_dev->state)) {
+               console_lock();
+               drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+               console_unlock();
+
+               drm_kms_helper_poll_enable(fsl_dev->drm);
+               enable_irq(fsl_dev->irq);
+               return PTR_ERR(fsl_dev->state);
+       }
+
+       clk_disable_unprepare(fsl_dev->pix_clk);
+       clk_disable_unprepare(fsl_dev->clk);
 
        return 0;
 }
@@ -245,21 +261,27 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
        if (!fsl_dev)
                return 0;
 
-       ret = clk_enable(fsl_dev->clk);
+       ret = clk_prepare_enable(fsl_dev->clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable dcu clk\n");
-               clk_unprepare(fsl_dev->clk);
                return ret;
        }
-       ret = clk_prepare(fsl_dev->clk);
+
+       ret = clk_prepare_enable(fsl_dev->pix_clk);
        if (ret < 0) {
-               dev_err(dev, "failed to prepare dcu clk\n");
+               dev_err(dev, "failed to enable pix clk\n");
                return ret;
        }
 
+       fsl_dcu_drm_init_planes(fsl_dev->drm);
+       drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
+
+       console_lock();
+       drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
+       console_unlock();
+
        drm_kms_helper_poll_enable(fsl_dev->drm);
-       regcache_cache_only(fsl_dev->regmap, false);
-       regcache_sync(fsl_dev->regmap);
+       enable_irq(fsl_dev->irq);
 
        return 0;
 }
@@ -273,12 +295,14 @@ static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
        .name = "ls1021a",
        .total_layer = 16,
        .max_layer = 4,
+       .layer_regs = LS1021A_LAYER_REG_NUM,
 };
 
 static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
        .name = "vf610",
        .total_layer = 64,
        .max_layer = 6,
+       .layer_regs = VF610_LAYER_REG_NUM,
 };
 
 static const struct of_device_id fsl_dcu_of_match[] = {
index c275f900ff231a564256bc84c2b1252b90dde6f1..3b371fe7491e164e2f3507f7d8283a677cbdfa11 100644 (file)
@@ -175,6 +175,7 @@ struct fsl_dcu_soc_data {
        unsigned int total_layer;
        /*max layer number DCU supported*/
        unsigned int max_layer;
+       unsigned int layer_regs;
 };
 
 struct fsl_dcu_drm_device {
@@ -193,6 +194,7 @@ struct fsl_dcu_drm_device {
        struct drm_encoder encoder;
        struct fsl_dcu_drm_connector connector;
        const struct fsl_dcu_soc_data *soc;
+       struct drm_atomic_state *state;
 };
 
 void fsl_dcu_fbdev_init(struct drm_device *dev);
index c564ec612b59bd16a561897f3ce12e3dc5016a66..d9d6cc1c8e39080fc1ecc7ec9a1b47cf22ff3b1e 100644 (file)
@@ -37,23 +37,22 @@ int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
 
        ret = fsl_dcu_drm_crtc_create(fsl_dev);
        if (ret)
-               return ret;
+               goto err;
 
        ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
        if (ret)
-               goto fail_encoder;
+               goto err;
 
-       ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder);
+       ret = fsl_dcu_create_outputs(fsl_dev);
        if (ret)
-               goto fail_connector;
+               goto err;
 
        drm_mode_config_reset(fsl_dev->drm);
        drm_kms_helper_poll_init(fsl_dev->drm);
 
        return 0;
-fail_encoder:
-       fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc);
-fail_connector:
-       fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder);
+
+err:
+       drm_mode_config_cleanup(fsl_dev->drm);
        return ret;
 }
index 7093109fbc21279636273f49624a7f06b602f21b..5a7b88e19e44f1fc1438914462200e1240cbb267 100644 (file)
@@ -25,9 +25,8 @@ to_fsl_dcu_connector(struct drm_connector *con)
                     : NULL;
 }
 
-int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
-                                struct drm_encoder *encoder);
 int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
                               struct drm_crtc *crtc);
+int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev);
 
 #endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
index 274558b3b32b2804c60695fe08ca0e4753c1bfad..e50467a0deb0451aa9c5723f8c97806f21add3d2 100644 (file)
@@ -217,6 +217,22 @@ static const u32 fsl_dcu_drm_plane_formats[] = {
        DRM_FORMAT_YUV422,
 };
 
+void fsl_dcu_drm_init_planes(struct drm_device *dev)
+{
+       struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+       int i, j;
+
+       for (i = 0; i < fsl_dev->soc->total_layer; i++) {
+               for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
+                       regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
+       }
+       regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
+                          DCU_MODE_DCU_MODE_MASK,
+                          DCU_MODE_DCU_MODE(DCU_MODE_OFF));
+       regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
+                    DCU_UPDATE_MODE_READREG);
+}
+
 struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
 {
        struct drm_plane *primary;
index d657f088d859369fddc797e7c7b166210f32dc25..8ee45f813ee87cecfa60b270677ab4bfd977af73 100644 (file)
@@ -12,6 +12,7 @@
 #ifndef __FSL_DCU_DRM_PLANE_H__
 #define __FSL_DCU_DRM_PLANE_H__
 
+void fsl_dcu_drm_init_planes(struct drm_device *dev);
 struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
 
 #endif /* __FSL_DCU_DRM_PLANE_H__ */
index 98c998da91eb04f55824aa55b3ffb6cdbb030b8c..26edcc899712d16db8959cbb2a5b01c932431c0b 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/backlight.h>
+#include <linux/of_graph.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
@@ -102,14 +103,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
        .reset = drm_atomic_helper_connector_reset,
 };
 
-static struct drm_encoder *
-fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
-{
-       struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
-
-       return fsl_con->encoder;
-}
-
 static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
 {
        struct fsl_dcu_drm_connector *fsl_connector;
@@ -136,17 +129,16 @@ static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
 }
 
 static const struct drm_connector_helper_funcs connector_helper_funcs = {
-       .best_encoder = fsl_dcu_drm_connector_best_encoder,
        .get_modes = fsl_dcu_drm_connector_get_modes,
        .mode_valid = fsl_dcu_drm_connector_mode_valid,
 };
 
-int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
-                                struct drm_encoder *encoder)
+static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
+                                struct drm_panel *panel)
 {
+       struct drm_encoder *encoder = &fsl_dev->encoder;
        struct drm_connector *connector = &fsl_dev->connector.base;
        struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
-       struct device_node *panel_node;
        int ret;
 
        fsl_dev->connector.encoder = encoder;
@@ -170,21 +162,7 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
                                      mode_config->dpms_property,
                                      DRM_MODE_DPMS_OFF);
 
-       panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
-       if (!panel_node) {
-               dev_err(fsl_dev->dev, "fsl,panel property not found\n");
-               ret = -ENODEV;
-               goto err_sysfs;
-       }
-
-       fsl_dev->connector.panel = of_drm_find_panel(panel_node);
-       if (!fsl_dev->connector.panel) {
-               ret = -EPROBE_DEFER;
-               goto err_panel;
-       }
-       of_node_put(panel_node);
-
-       ret = drm_panel_attach(fsl_dev->connector.panel, connector);
+       ret = drm_panel_attach(panel, connector);
        if (ret) {
                dev_err(fsl_dev->dev, "failed to attach panel\n");
                goto err_sysfs;
@@ -192,11 +170,62 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
 
        return 0;
 
-err_panel:
-       of_node_put(panel_node);
 err_sysfs:
        drm_connector_unregister(connector);
 err_cleanup:
        drm_connector_cleanup(connector);
        return ret;
 }
+
+static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev,
+                                  const struct of_endpoint *ep)
+{
+       struct drm_bridge *bridge;
+       struct device_node *np;
+
+       np = of_graph_get_remote_port_parent(ep->local_node);
+
+       fsl_dev->connector.panel = of_drm_find_panel(np);
+       if (fsl_dev->connector.panel) {
+               of_node_put(np);
+               return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
+       }
+
+       bridge = of_drm_find_bridge(np);
+       of_node_put(np);
+       if (!bridge)
+               return -ENODEV;
+
+       fsl_dev->encoder.bridge = bridge;
+       bridge->encoder = &fsl_dev->encoder;
+
+       return drm_bridge_attach(fsl_dev->drm, bridge);
+}
+
+int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
+{
+       struct of_endpoint ep;
+       struct device_node *ep_node, *panel_node;
+       int ret;
+
+       /* This is for backward compatibility */
+       panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
+       if (panel_node) {
+               fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+               of_node_put(panel_node);
+               if (!fsl_dev->connector.panel)
+                       return -EPROBE_DEFER;
+               return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
+       }
+
+       ep_node = of_graph_get_next_endpoint(fsl_dev->np, NULL);
+       if (!ep_node)
+               return -ENODEV;
+
+       ret = of_graph_parse_endpoint(ep_node, &ep);
+       of_node_put(ep_node);
+       if (ret)
+               return -ENODEV;
+
+       return fsl_dcu_attach_endpoint(fsl_dev, &ep);
+}
index bbe34f1c0505f7470b900831636d15e1b8d5ac95..bca09ea24632424f6899ff5c9ac9b4a1017a2300 100644 (file)
@@ -92,6 +92,7 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
                goto err_node_put;
        }
 
+       of_node_put(np);
        clk_prepare_enable(tcon->ipg_clk);
 
        dev_info(dev, "Using TCON in bypass mode\n");
index 17f928ec84ea77790e4fef756bee55757f42d8e1..8906d67494fc47d165ec33714ffa62c5b29294d5 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_GMA500
        tristate "Intel GMA5/600 KMS Framebuffer"
        depends on DRM && PCI && X86
-       select FB_CFB_COPYAREA
-       select FB_CFB_FILLRECT
-       select FB_CFB_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
        select ACPI_VIDEO if ACPI
index c95406e6f44de21f23d6e9093f59151e0924b176..1a1cf7a3b5ef00f5131682f5e709deb9cbf97f47 100644 (file)
@@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
        }
 }
 
-void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
-                       u32 start, u32 size)
+int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
+                      u32 size)
 {
        struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
        int i;
-       int end = (start + size > 256) ? 256 : start + size;
 
-       for (i = start; i < end; i++) {
+       for (i = 0; i < size; i++) {
                gma_crtc->lut_r[i] = red[i] >> 8;
                gma_crtc->lut_g[i] = green[i] >> 8;
                gma_crtc->lut_b[i] = blue[i] >> 8;
        }
 
        gma_crtc_load_lut(crtc);
+
+       return 0;
 }
 
 /**
@@ -281,7 +282,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
                REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
 
                /* Turn off vblank interrupts */
-               drm_vblank_off(dev, pipe);
+               drm_crtc_vblank_off(crtc);
 
                /* Wait for vblank for the disable to take effect */
                gma_wait_for_vblank(dev);
index b2491c65f0535e938772dfcf9e886f98e4b8d6e4..e72dd08b701bb7830005e371cbc424b5b3ab37cc 100644 (file)
@@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
                               uint32_t width, uint32_t height);
 extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
 extern void gma_crtc_load_lut(struct drm_crtc *crtc);
-extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-                              u16 *blue, u32 start, u32 size);
+extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                             u16 *blue, u32 size);
 extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
 extern void gma_crtc_prepare(struct drm_crtc *crtc);
 extern void gma_crtc_commit(struct drm_crtc *crtc);
index 398015be87e443885387342c752edd6dd74fcd9d..7b6c849250984149f871ea9075b995def2291021 100644 (file)
@@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct gma_crtc *gma_crtc;
        int i;
-       uint16_t *r_base, *g_base, *b_base;
 
        /* We allocate a extra array of drm_connector pointers
         * for fbdev after the crtc */
@@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
        gma_crtc->pipe = pipe;
        gma_crtc->plane = pipe;
 
-       r_base = gma_crtc->base.gamma_store;
-       g_base = r_base + 256;
-       b_base = g_base + 256;
        for (i = 0; i < 256; i++) {
                gma_crtc->lut_r[i] = i;
                gma_crtc->lut_g[i] = i;
                gma_crtc->lut_b[i] = i;
-               r_base[i] = i << 8;
-               g_base[i] = i << 8;
-               b_base[i] = i << 8;
 
                gma_crtc->lut_adj[i] = 0;
        }
index ea0df6115f7e96fa52a8e295b21deb4a6b30100e..499f64405daca9729b3e00da06f4ec06e7dedc00 100644 (file)
@@ -4,6 +4,7 @@ config DRM_HISI_KIRIN
        select DRM_KMS_HELPER
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
+       select HISI_KIRIN_DW_DSI
        help
          Choose this option if you have a hisilicon Kirin chipsets(hi6220).
          If M is selected the module will be called kirin-drm.
index fba6372d060e2124dfa9cdf9208858ccf044ff90..c3707d47cd89762c592f7d4f644c004bea27ac74 100644 (file)
@@ -487,6 +487,7 @@ static void ade_crtc_enable(struct drm_crtc *crtc)
        ade_set_medianoc_qos(acrtc);
        ade_display_enable(acrtc);
        ade_dump_regs(ctx->base);
+       drm_crtc_vblank_on(crtc);
        acrtc->enable = true;
 }
 
@@ -498,17 +499,11 @@ static void ade_crtc_disable(struct drm_crtc *crtc)
        if (!acrtc->enable)
                return;
 
+       drm_crtc_vblank_off(crtc);
        ade_power_down(ctx);
        acrtc->enable = false;
 }
 
-static int ade_crtc_atomic_check(struct drm_crtc *crtc,
-                                struct drm_crtc_state *state)
-{
-       /* do nothing */
-       return 0;
-}
-
 static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
        struct ade_crtc *acrtc = to_ade_crtc(crtc);
@@ -537,6 +532,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
 {
        struct ade_crtc *acrtc = to_ade_crtc(crtc);
        struct ade_hw_ctx *ctx = acrtc->ctx;
+       struct drm_pending_vblank_event *event = crtc->state->event;
        void __iomem *base = ctx->base;
 
        /* only crtc is enabled regs take effect */
@@ -545,12 +541,22 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
                /* flush ade registers */
                writel(ADE_ENABLE, base + ADE_EN);
        }
+
+       if (event) {
+               crtc->state->event = NULL;
+
+               spin_lock_irq(&crtc->dev->event_lock);
+               if (drm_crtc_vblank_get(crtc) == 0)
+                       drm_crtc_arm_vblank_event(crtc, event);
+               else
+                       drm_crtc_send_vblank_event(crtc, event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+       }
 }
 
 static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
        .enable         = ade_crtc_enable,
        .disable        = ade_crtc_disable,
-       .atomic_check   = ade_crtc_atomic_check,
        .mode_set_nofb  = ade_crtc_mode_set_nofb,
        .atomic_begin   = ade_crtc_atomic_begin,
        .atomic_flush   = ade_crtc_atomic_flush,
@@ -961,21 +967,21 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
        }
 
        ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
-       if (!ctx->ade_core_clk) {
+       if (IS_ERR(ctx->ade_core_clk)) {
                DRM_ERROR("failed to parse clk ADE_CORE\n");
-               return -ENODEV;
+               return PTR_ERR(ctx->ade_core_clk);
        }
 
        ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
-       if (!ctx->media_noc_clk) {
+       if (IS_ERR(ctx->media_noc_clk)) {
                DRM_ERROR("failed to parse clk CODEC_JPEG\n");
-           return -ENODEV;
+               return PTR_ERR(ctx->media_noc_clk);
        }
 
        ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
-       if (!ctx->ade_pix_clk) {
+       if (IS_ERR(ctx->ade_pix_clk)) {
                DRM_ERROR("failed to parse clk ADE_PIX\n");
-           return -ENODEV;
+               return PTR_ERR(ctx->ade_pix_clk);
        }
 
        return 0;
index 3f94785fbcca6fe6dde61c77815f7dcc3a22acc7..1edd9bc802947f00cc1868e1e82a13f463c5b537 100644 (file)
@@ -171,9 +171,8 @@ static struct drm_driver kirin_drm_driver = {
        .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
                                  DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
        .fops                   = &kirin_drm_fops,
-       .set_busid              = drm_platform_set_busid,
 
-       .gem_free_object        = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .dumb_create            = kirin_gem_cma_dumb_create,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
@@ -221,19 +220,12 @@ static int kirin_drm_bind(struct device *dev)
        if (ret)
                goto err_kms_cleanup;
 
-       /* connectors should be registered after drm device register */
-       ret = drm_connector_register_all(drm_dev);
-       if (ret)
-               goto err_drm_dev_unregister;
-
        DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
                 driver->name, driver->major, driver->minor, driver->patchlevel,
                 driver->date, drm_dev->primary->index);
 
        return 0;
 
-err_drm_dev_unregister:
-       drm_dev_unregister(drm_dev);
 err_kms_cleanup:
        kirin_drm_kms_cleanup(drm_dev);
 err_drm_dev_unref:
@@ -246,7 +238,6 @@ static void kirin_drm_unbind(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
 
-       drm_connector_unregister_all(drm_dev);
        drm_dev_unregister(drm_dev);
        kirin_drm_kms_cleanup(drm_dev);
        drm_dev_unref(drm_dev);
index 22c7ed63a001df43190a26ffc070a69c183f729c..4d341db462a2442878991e45a3438d770f1afe64 100644 (file)
@@ -1,12 +1,6 @@
 menu "I2C encoder or helper chips"
      depends on DRM && DRM_KMS_HELPER && I2C
 
-config DRM_I2C_ADV7511
-       tristate "AV7511 encoder"
-       select REGMAP_I2C
-       help
-         Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
-
 config DRM_I2C_CH7006
        tristate "Chrontel ch7006 TV encoder"
        default m if DRM_NOUVEAU
index 2c72eb584ab7c628254ecfec90d5de70773fab4e..43aa33baebed8e7888c07c0f818848cc1adb80f3 100644 (file)
@@ -1,7 +1,5 @@
 ccflags-y := -Iinclude/drm
 
-obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
-
 ch7006-y := ch7006_drv.o ch7006_mode.o
 obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
 
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
deleted file mode 100644 (file)
index a02112b..0000000
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * Analog Devices ADV7511 HDMI transmitter driver
- *
- * Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#include <linux/device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/slab.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
-
-#include "adv7511.h"
-
-struct adv7511 {
-       struct i2c_client *i2c_main;
-       struct i2c_client *i2c_edid;
-
-       struct regmap *regmap;
-       struct regmap *packet_memory_regmap;
-       enum drm_connector_status status;
-       bool powered;
-
-       unsigned int f_tmds;
-
-       unsigned int current_edid_segment;
-       uint8_t edid_buf[256];
-       bool edid_read;
-
-       wait_queue_head_t wq;
-       struct drm_encoder *encoder;
-
-       bool embedded_sync;
-       enum adv7511_sync_polarity vsync_polarity;
-       enum adv7511_sync_polarity hsync_polarity;
-       bool rgb;
-
-       struct edid *edid;
-
-       struct gpio_desc *gpio_pd;
-};
-
-static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
-{
-       return to_encoder_slave(encoder)->slave_priv;
-}
-
-/* ADI recommended values for proper operation. */
-static const struct reg_sequence adv7511_fixed_registers[] = {
-       { 0x98, 0x03 },
-       { 0x9a, 0xe0 },
-       { 0x9c, 0x30 },
-       { 0x9d, 0x61 },
-       { 0xa2, 0xa4 },
-       { 0xa3, 0xa4 },
-       { 0xe0, 0xd0 },
-       { 0xf9, 0x00 },
-       { 0x55, 0x02 },
-};
-
-/* -----------------------------------------------------------------------------
- * Register access
- */
-
-static const uint8_t adv7511_register_defaults[] = {
-       0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
-       0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
-       0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
-       0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
-       0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
-       0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
-       0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
-       0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
-       0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
-       0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
-       0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
-       0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
-       0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
-       0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
-       0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
-       0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
-{
-       switch (reg) {
-       case ADV7511_REG_CHIP_REVISION:
-       case ADV7511_REG_SPDIF_FREQ:
-       case ADV7511_REG_CTS_AUTOMATIC1:
-       case ADV7511_REG_CTS_AUTOMATIC2:
-       case ADV7511_REG_VIC_DETECTED:
-       case ADV7511_REG_VIC_SEND:
-       case ADV7511_REG_AUX_VIC_DETECTED:
-       case ADV7511_REG_STATUS:
-       case ADV7511_REG_GC(1):
-       case ADV7511_REG_INT(0):
-       case ADV7511_REG_INT(1):
-       case ADV7511_REG_PLL_STATUS:
-       case ADV7511_REG_AN(0):
-       case ADV7511_REG_AN(1):
-       case ADV7511_REG_AN(2):
-       case ADV7511_REG_AN(3):
-       case ADV7511_REG_AN(4):
-       case ADV7511_REG_AN(5):
-       case ADV7511_REG_AN(6):
-       case ADV7511_REG_AN(7):
-       case ADV7511_REG_HDCP_STATUS:
-       case ADV7511_REG_BCAPS:
-       case ADV7511_REG_BKSV(0):
-       case ADV7511_REG_BKSV(1):
-       case ADV7511_REG_BKSV(2):
-       case ADV7511_REG_BKSV(3):
-       case ADV7511_REG_BKSV(4):
-       case ADV7511_REG_DDC_STATUS:
-       case ADV7511_REG_EDID_READ_CTRL:
-       case ADV7511_REG_BSTATUS(0):
-       case ADV7511_REG_BSTATUS(1):
-       case ADV7511_REG_CHIP_ID_HIGH:
-       case ADV7511_REG_CHIP_ID_LOW:
-               return true;
-       }
-
-       return false;
-}
-
-static const struct regmap_config adv7511_regmap_config = {
-       .reg_bits = 8,
-       .val_bits = 8,
-
-       .max_register = 0xff,
-       .cache_type = REGCACHE_RBTREE,
-       .reg_defaults_raw = adv7511_register_defaults,
-       .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
-
-       .volatile_reg = adv7511_register_volatile,
-};
-
-/* -----------------------------------------------------------------------------
- * Hardware configuration
- */
-
-static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
-                                const uint16_t *coeff,
-                                unsigned int scaling_factor)
-{
-       unsigned int i;
-
-       regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
-                          ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
-
-       if (enable) {
-               for (i = 0; i < 12; ++i) {
-                       regmap_update_bits(adv7511->regmap,
-                                          ADV7511_REG_CSC_UPPER(i),
-                                          0x1f, coeff[i] >> 8);
-                       regmap_write(adv7511->regmap,
-                                    ADV7511_REG_CSC_LOWER(i),
-                                    coeff[i] & 0xff);
-               }
-       }
-
-       if (enable)
-               regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
-                                  0xe0, 0x80 | (scaling_factor << 5));
-       else
-               regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
-                                  0x80, 0x00);
-
-       regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
-                          ADV7511_CSC_UPDATE_MODE, 0);
-}
-
-static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
-{
-       if (packet & 0xff)
-               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
-                                  packet, 0xff);
-
-       if (packet & 0xff00) {
-               packet >>= 8;
-               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
-                                  packet, 0xff);
-       }
-
-       return 0;
-}
-
-static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
-{
-       if (packet & 0xff)
-               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
-                                  packet, 0x00);
-
-       if (packet & 0xff00) {
-               packet >>= 8;
-               regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
-                                  packet, 0x00);
-       }
-
-       return 0;
-}
-
-/* Coefficients for adv7511 color space conversion */
-static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
-       0x0734, 0x04ad, 0x0000, 0x1c1b,
-       0x1ddc, 0x04ad, 0x1f24, 0x0135,
-       0x0000, 0x04ad, 0x087c, 0x1b77,
-};
-
-static void adv7511_set_config_csc(struct adv7511 *adv7511,
-                                  struct drm_connector *connector,
-                                  bool rgb)
-{
-       struct adv7511_video_config config;
-       bool output_format_422, output_format_ycbcr;
-       unsigned int mode;
-       uint8_t infoframe[17];
-
-       if (adv7511->edid)
-               config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
-       else
-               config.hdmi_mode = false;
-
-       hdmi_avi_infoframe_init(&config.avi_infoframe);
-
-       config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
-
-       if (rgb) {
-               config.csc_enable = false;
-               config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
-       } else {
-               config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
-               config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
-
-               if ((connector->display_info.color_formats &
-                    DRM_COLOR_FORMAT_YCRCB422) &&
-                   config.hdmi_mode) {
-                       config.csc_enable = false;
-                       config.avi_infoframe.colorspace =
-                               HDMI_COLORSPACE_YUV422;
-               } else {
-                       config.csc_enable = true;
-                       config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
-               }
-       }
-
-       if (config.hdmi_mode) {
-               mode = ADV7511_HDMI_CFG_MODE_HDMI;
-
-               switch (config.avi_infoframe.colorspace) {
-               case HDMI_COLORSPACE_YUV444:
-                       output_format_422 = false;
-                       output_format_ycbcr = true;
-                       break;
-               case HDMI_COLORSPACE_YUV422:
-                       output_format_422 = true;
-                       output_format_ycbcr = true;
-                       break;
-               default:
-                       output_format_422 = false;
-                       output_format_ycbcr = false;
-                       break;
-               }
-       } else {
-               mode = ADV7511_HDMI_CFG_MODE_DVI;
-               output_format_422 = false;
-               output_format_ycbcr = false;
-       }
-
-       adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
-
-       adv7511_set_colormap(adv7511, config.csc_enable,
-                            config.csc_coefficents,
-                            config.csc_scaling_factor);
-
-       regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
-                          (output_format_422 << 7) | output_format_ycbcr);
-
-       regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
-                          ADV7511_HDMI_CFG_MODE_MASK, mode);
-
-       hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
-                               sizeof(infoframe));
-
-       /* The AVI infoframe id is not configurable */
-       regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
-                         infoframe + 1, sizeof(infoframe) - 1);
-
-       adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
-}
-
-static void adv7511_set_link_config(struct adv7511 *adv7511,
-                                   const struct adv7511_link_config *config)
-{
-       /*
-        * The input style values documented in the datasheet don't match the
-        * hardware register field values :-(
-        */
-       static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
-
-       unsigned int clock_delay;
-       unsigned int color_depth;
-       unsigned int input_id;
-
-       clock_delay = (config->clock_delay + 1200) / 400;
-       color_depth = config->input_color_depth == 8 ? 3
-                   : (config->input_color_depth == 10 ? 1 : 2);
-
-       /* TODO Support input ID 6 */
-       if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
-               input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
-                        ? 5 : 0;
-       else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
-               input_id = config->embedded_sync ? 8 : 7;
-       else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
-               input_id = config->embedded_sync ? 4 : 3;
-       else
-               input_id = config->embedded_sync ? 2 : 1;
-
-       regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
-                          input_id);
-       regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
-                          (color_depth << 4) |
-                          (input_styles[config->input_style] << 2));
-       regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
-                    config->input_justification << 3);
-       regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
-                    config->sync_pulse << 2);
-
-       regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
-
-       adv7511->embedded_sync = config->embedded_sync;
-       adv7511->hsync_polarity = config->hsync_polarity;
-       adv7511->vsync_polarity = config->vsync_polarity;
-       adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
-}
-
-static void adv7511_power_on(struct adv7511 *adv7511)
-{
-       adv7511->current_edid_segment = -1;
-
-       regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
-                          ADV7511_POWER_POWER_DOWN, 0);
-       if (adv7511->i2c_main->irq) {
-               /*
-                * Documentation says the INT_ENABLE registers are reset in
-                * POWER_DOWN mode. My 7511w preserved the bits, however.
-                * Still, let's be safe and stick to the documentation.
-                */
-               regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
-                            ADV7511_INT0_EDID_READY);
-               regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
-                            ADV7511_INT1_DDC_ERROR);
-       }
-
-       /*
-        * Per spec it is allowed to pulse the HPD signal to indicate that the
-        * EDID information has changed. Some monitors do this when they wakeup
-        * from standby or are enabled. When the HPD goes low the adv7511 is
-        * reset and the outputs are disabled which might cause the monitor to
-        * go to standby again. To avoid this we ignore the HPD pin for the
-        * first few seconds after enabling the output.
-        */
-       regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
-                          ADV7511_REG_POWER2_HPD_SRC_MASK,
-                          ADV7511_REG_POWER2_HPD_SRC_NONE);
-
-       /*
-        * Most of the registers are reset during power down or when HPD is low.
-        */
-       regcache_sync(adv7511->regmap);
-
-       adv7511->powered = true;
-}
-
-static void adv7511_power_off(struct adv7511 *adv7511)
-{
-       /* TODO: setup additional power down modes */
-       regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
-                          ADV7511_POWER_POWER_DOWN,
-                          ADV7511_POWER_POWER_DOWN);
-       regcache_mark_dirty(adv7511->regmap);
-
-       adv7511->powered = false;
-}
-
-/* -----------------------------------------------------------------------------
- * Interrupt and hotplug detection
- */
-
-static bool adv7511_hpd(struct adv7511 *adv7511)
-{
-       unsigned int irq0;
-       int ret;
-
-       ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
-       if (ret < 0)
-               return false;
-
-       if (irq0 & ADV7511_INT0_HPD) {
-               regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-                            ADV7511_INT0_HPD);
-               return true;
-       }
-
-       return false;
-}
-
-static int adv7511_irq_process(struct adv7511 *adv7511)
-{
-       unsigned int irq0, irq1;
-       int ret;
-
-       ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
-       if (ret < 0)
-               return ret;
-
-       ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
-       if (ret < 0)
-               return ret;
-
-       regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
-       regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
-
-       if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
-               drm_helper_hpd_irq_event(adv7511->encoder->dev);
-
-       if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
-               adv7511->edid_read = true;
-
-               if (adv7511->i2c_main->irq)
-                       wake_up_all(&adv7511->wq);
-       }
-
-       return 0;
-}
-
-static irqreturn_t adv7511_irq_handler(int irq, void *devid)
-{
-       struct adv7511 *adv7511 = devid;
-       int ret;
-
-       ret = adv7511_irq_process(adv7511);
-       return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
-}
-
-/* -----------------------------------------------------------------------------
- * EDID retrieval
- */
-
-static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
-{
-       int ret;
-
-       if (adv7511->i2c_main->irq) {
-               ret = wait_event_interruptible_timeout(adv7511->wq,
-                               adv7511->edid_read, msecs_to_jiffies(timeout));
-       } else {
-               for (; timeout > 0; timeout -= 25) {
-                       ret = adv7511_irq_process(adv7511);
-                       if (ret < 0)
-                               break;
-
-                       if (adv7511->edid_read)
-                               break;
-
-                       msleep(25);
-               }
-       }
-
-       return adv7511->edid_read ? 0 : -EIO;
-}
-
-static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
-                                 size_t len)
-{
-       struct adv7511 *adv7511 = data;
-       struct i2c_msg xfer[2];
-       uint8_t offset;
-       unsigned int i;
-       int ret;
-
-       if (len > 128)
-               return -EINVAL;
-
-       if (adv7511->current_edid_segment != block / 2) {
-               unsigned int status;
-
-               ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
-                                 &status);
-               if (ret < 0)
-                       return ret;
-
-               if (status != 2) {
-                       adv7511->edid_read = false;
-                       regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
-                                    block);
-                       ret = adv7511_wait_for_edid(adv7511, 200);
-                       if (ret < 0)
-                               return ret;
-               }
-
-               /* Break this apart, hopefully more I2C controllers will
-                * support 64 byte transfers than 256 byte transfers
-                */
-
-               xfer[0].addr = adv7511->i2c_edid->addr;
-               xfer[0].flags = 0;
-               xfer[0].len = 1;
-               xfer[0].buf = &offset;
-               xfer[1].addr = adv7511->i2c_edid->addr;
-               xfer[1].flags = I2C_M_RD;
-               xfer[1].len = 64;
-               xfer[1].buf = adv7511->edid_buf;
-
-               offset = 0;
-
-               for (i = 0; i < 4; ++i) {
-                       ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
-                                          ARRAY_SIZE(xfer));
-                       if (ret < 0)
-                               return ret;
-                       else if (ret != 2)
-                               return -EIO;
-
-                       xfer[1].buf += 64;
-                       offset += 64;
-               }
-
-               adv7511->current_edid_segment = block / 2;
-       }
-
-       if (block % 2 == 0)
-               memcpy(buf, adv7511->edid_buf, len);
-       else
-               memcpy(buf, adv7511->edid_buf + 128, len);
-
-       return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * Encoder operations
- */
-
-static int adv7511_get_modes(struct drm_encoder *encoder,
-                            struct drm_connector *connector)
-{
-       struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-       struct edid *edid;
-       unsigned int count;
-
-       /* Reading the EDID only works if the device is powered */
-       if (!adv7511->powered) {
-               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
-                                  ADV7511_POWER_POWER_DOWN, 0);
-               if (adv7511->i2c_main->irq) {
-                       regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
-                                    ADV7511_INT0_EDID_READY);
-                       regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
-                                    ADV7511_INT1_DDC_ERROR);
-               }
-               adv7511->current_edid_segment = -1;
-       }
-
-       edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
-
-       if (!adv7511->powered)
-               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
-                                  ADV7511_POWER_POWER_DOWN,
-                                  ADV7511_POWER_POWER_DOWN);
-
-       kfree(adv7511->edid);
-       adv7511->edid = edid;
-       if (!edid)
-               return 0;
-
-       drm_mode_connector_update_edid_property(connector, edid);
-       count = drm_add_edid_modes(connector, edid);
-
-       adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
-
-       return count;
-}
-
-static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-
-       if (mode == DRM_MODE_DPMS_ON)
-               adv7511_power_on(adv7511);
-       else
-               adv7511_power_off(adv7511);
-}
-
-static enum drm_connector_status
-adv7511_encoder_detect(struct drm_encoder *encoder,
-                      struct drm_connector *connector)
-{
-       struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-       enum drm_connector_status status;
-       unsigned int val;
-       bool hpd;
-       int ret;
-
-       ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
-       if (ret < 0)
-               return connector_status_disconnected;
-
-       if (val & ADV7511_STATUS_HPD)
-               status = connector_status_connected;
-       else
-               status = connector_status_disconnected;
-
-       hpd = adv7511_hpd(adv7511);
-
-       /* The chip resets itself when the cable is disconnected, so in case
-        * there is a pending HPD interrupt and the cable is connected there was
-        * at least one transition from disconnected to connected and the chip
-        * has to be reinitialized. */
-       if (status == connector_status_connected && hpd && adv7511->powered) {
-               regcache_mark_dirty(adv7511->regmap);
-               adv7511_power_on(adv7511);
-               adv7511_get_modes(encoder, connector);
-               if (adv7511->status == connector_status_connected)
-                       status = connector_status_disconnected;
-       } else {
-               /* Renable HPD sensing */
-               regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
-                                  ADV7511_REG_POWER2_HPD_SRC_MASK,
-                                  ADV7511_REG_POWER2_HPD_SRC_BOTH);
-       }
-
-       adv7511->status = status;
-       return status;
-}
-
-static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
-                                     struct drm_display_mode *mode)
-{
-       if (mode->clock > 165000)
-               return MODE_CLOCK_HIGH;
-
-       return MODE_OK;
-}
-
-static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
-                                    struct drm_display_mode *mode,
-                                    struct drm_display_mode *adj_mode)
-{
-       struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-       unsigned int low_refresh_rate;
-       unsigned int hsync_polarity = 0;
-       unsigned int vsync_polarity = 0;
-
-       if (adv7511->embedded_sync) {
-               unsigned int hsync_offset, hsync_len;
-               unsigned int vsync_offset, vsync_len;
-
-               hsync_offset = adj_mode->crtc_hsync_start -
-                              adj_mode->crtc_hdisplay;
-               vsync_offset = adj_mode->crtc_vsync_start -
-                              adj_mode->crtc_vdisplay;
-               hsync_len = adj_mode->crtc_hsync_end -
-                           adj_mode->crtc_hsync_start;
-               vsync_len = adj_mode->crtc_vsync_end -
-                           adj_mode->crtc_vsync_start;
-
-               /* The hardware vsync generator has a off-by-one bug */
-               vsync_offset += 1;
-
-               regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
-                            ((hsync_offset >> 10) & 0x7) << 5);
-               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
-                            (hsync_offset >> 2) & 0xff);
-               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
-                            ((hsync_offset & 0x3) << 6) |
-                            ((hsync_len >> 4) & 0x3f));
-               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
-                            ((hsync_len & 0xf) << 4) |
-                            ((vsync_offset >> 6) & 0xf));
-               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
-                            ((vsync_offset & 0x3f) << 2) |
-                            ((vsync_len >> 8) & 0x3));
-               regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
-                            vsync_len & 0xff);
-
-               hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
-               vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
-       } else {
-               enum adv7511_sync_polarity mode_hsync_polarity;
-               enum adv7511_sync_polarity mode_vsync_polarity;
-
-               /**
-                * If the input signal is always low or always high we want to
-                * invert or let it passthrough depending on the polarity of the
-                * current mode.
-                **/
-               if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
-                       mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
-               else
-                       mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
-
-               if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
-                       mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
-               else
-                       mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
-
-               if (adv7511->hsync_polarity != mode_hsync_polarity &&
-                   adv7511->hsync_polarity !=
-                   ADV7511_SYNC_POLARITY_PASSTHROUGH)
-                       hsync_polarity = 1;
-
-               if (adv7511->vsync_polarity != mode_vsync_polarity &&
-                   adv7511->vsync_polarity !=
-                   ADV7511_SYNC_POLARITY_PASSTHROUGH)
-                       vsync_polarity = 1;
-       }
-
-       if (mode->vrefresh <= 24000)
-               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
-       else if (mode->vrefresh <= 25000)
-               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
-       else if (mode->vrefresh <= 30000)
-               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
-       else
-               low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
-
-       regmap_update_bits(adv7511->regmap, 0xfb,
-               0x6, low_refresh_rate << 1);
-       regmap_update_bits(adv7511->regmap, 0x17,
-               0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
-
-       /*
-        * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
-        * supposed to give better results.
-        */
-
-       adv7511->f_tmds = mode->clock;
-}
-
-static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
-       .dpms = adv7511_encoder_dpms,
-       .mode_valid = adv7511_encoder_mode_valid,
-       .mode_set = adv7511_encoder_mode_set,
-       .detect = adv7511_encoder_detect,
-       .get_modes = adv7511_get_modes,
-};
-
-/* -----------------------------------------------------------------------------
- * Probe & remove
- */
-
-static int adv7511_parse_dt(struct device_node *np,
-                           struct adv7511_link_config *config)
-{
-       const char *str;
-       int ret;
-
-       memset(config, 0, sizeof(*config));
-
-       of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
-       if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
-           config->input_color_depth != 12)
-               return -EINVAL;
-
-       ret = of_property_read_string(np, "adi,input-colorspace", &str);
-       if (ret < 0)
-               return ret;
-
-       if (!strcmp(str, "rgb"))
-               config->input_colorspace = HDMI_COLORSPACE_RGB;
-       else if (!strcmp(str, "yuv422"))
-               config->input_colorspace = HDMI_COLORSPACE_YUV422;
-       else if (!strcmp(str, "yuv444"))
-               config->input_colorspace = HDMI_COLORSPACE_YUV444;
-       else
-               return -EINVAL;
-
-       ret = of_property_read_string(np, "adi,input-clock", &str);
-       if (ret < 0)
-               return ret;
-
-       if (!strcmp(str, "1x"))
-               config->input_clock = ADV7511_INPUT_CLOCK_1X;
-       else if (!strcmp(str, "2x"))
-               config->input_clock = ADV7511_INPUT_CLOCK_2X;
-       else if (!strcmp(str, "ddr"))
-               config->input_clock = ADV7511_INPUT_CLOCK_DDR;
-       else
-               return -EINVAL;
-
-       if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
-           config->input_clock != ADV7511_INPUT_CLOCK_1X) {
-               ret = of_property_read_u32(np, "adi,input-style",
-                                          &config->input_style);
-               if (ret)
-                       return ret;
-
-               if (config->input_style < 1 || config->input_style > 3)
-                       return -EINVAL;
-
-               ret = of_property_read_string(np, "adi,input-justification",
-                                             &str);
-               if (ret < 0)
-                       return ret;
-
-               if (!strcmp(str, "left"))
-                       config->input_justification =
-                               ADV7511_INPUT_JUSTIFICATION_LEFT;
-               else if (!strcmp(str, "evenly"))
-                       config->input_justification =
-                               ADV7511_INPUT_JUSTIFICATION_EVENLY;
-               else if (!strcmp(str, "right"))
-                       config->input_justification =
-                               ADV7511_INPUT_JUSTIFICATION_RIGHT;
-               else
-                       return -EINVAL;
-
-       } else {
-               config->input_style = 1;
-               config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
-       }
-
-       of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
-       if (config->clock_delay < -1200 || config->clock_delay > 1600)
-               return -EINVAL;
-
-       config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
-
-       /* Hardcode the sync pulse configurations for now. */
-       config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
-       config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
-       config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
-
-       return 0;
-}
-
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
-static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
-{
-       struct adv7511_link_config link_config;
-       struct adv7511 *adv7511;
-       struct device *dev = &i2c->dev;
-       unsigned int val;
-       int ret;
-
-       if (!dev->of_node)
-               return -EINVAL;
-
-       adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
-       if (!adv7511)
-               return -ENOMEM;
-
-       adv7511->powered = false;
-       adv7511->status = connector_status_disconnected;
-
-       ret = adv7511_parse_dt(dev->of_node, &link_config);
-       if (ret)
-               return ret;
-
-       /*
-        * The power down GPIO is optional. If present, toggle it from active to
-        * inactive to wake up the encoder.
-        */
-       adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
-       if (IS_ERR(adv7511->gpio_pd))
-               return PTR_ERR(adv7511->gpio_pd);
-
-       if (adv7511->gpio_pd) {
-               mdelay(5);
-               gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
-       }
-
-       adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
-       if (IS_ERR(adv7511->regmap))
-               return PTR_ERR(adv7511->regmap);
-
-       ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
-       if (ret)
-               return ret;
-       dev_dbg(dev, "Rev. %d\n", val);
-
-       ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
-                                   ARRAY_SIZE(adv7511_fixed_registers));
-       if (ret)
-               return ret;
-
-       regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
-       regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
-                    packet_i2c_addr);
-       regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
-       adv7511_packet_disable(adv7511, 0xffff);
-
-       adv7511->i2c_main = i2c;
-       adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
-       if (!adv7511->i2c_edid)
-               return -ENOMEM;
-
-       if (i2c->irq) {
-               init_waitqueue_head(&adv7511->wq);
-
-               ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
-                                               adv7511_irq_handler,
-                                               IRQF_ONESHOT, dev_name(dev),
-                                               adv7511);
-               if (ret)
-                       goto err_i2c_unregister_device;
-       }
-
-       /* CEC is unused for now */
-       regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
-                    ADV7511_CEC_CTRL_POWER_DOWN);
-
-       adv7511_power_off(adv7511);
-
-       i2c_set_clientdata(i2c, adv7511);
-
-       adv7511_set_link_config(adv7511, &link_config);
-
-       return 0;
-
-err_i2c_unregister_device:
-       i2c_unregister_device(adv7511->i2c_edid);
-
-       return ret;
-}
-
-static int adv7511_remove(struct i2c_client *i2c)
-{
-       struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
-
-       i2c_unregister_device(adv7511->i2c_edid);
-
-       kfree(adv7511->edid);
-
-       return 0;
-}
-
-static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
-                               struct drm_encoder_slave *encoder)
-{
-
-       struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
-
-       encoder->slave_priv = adv7511;
-       encoder->slave_funcs = &adv7511_encoder_funcs;
-
-       adv7511->encoder = &encoder->base;
-
-       return 0;
-}
-
-static const struct i2c_device_id adv7511_i2c_ids[] = {
-       { "adv7511", 0 },
-       { "adv7511w", 0 },
-       { "adv7513", 0 },
-       { }
-};
-MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
-
-static const struct of_device_id adv7511_of_ids[] = {
-       { .compatible = "adi,adv7511", },
-       { .compatible = "adi,adv7511w", },
-       { .compatible = "adi,adv7513", },
-       { }
-};
-MODULE_DEVICE_TABLE(of, adv7511_of_ids);
-
-static struct drm_i2c_encoder_driver adv7511_driver = {
-       .i2c_driver = {
-               .driver = {
-                       .name = "adv7511",
-                       .of_match_table = adv7511_of_ids,
-               },
-               .id_table = adv7511_i2c_ids,
-               .probe = adv7511_probe,
-               .remove = adv7511_remove,
-       },
-
-       .encoder_init = adv7511_encoder_init,
-};
-
-static int __init adv7511_init(void)
-{
-       return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver);
-}
-module_init(adv7511_init);
-
-static void __exit adv7511_exit(void)
-{
-       drm_i2c_encoder_unregister(&adv7511_driver);
-}
-module_exit(adv7511_exit);
-
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
deleted file mode 100644 (file)
index 38515b3..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- * Analog Devices ADV7511 HDMI transmitter driver
- *
- * Copyright 2012 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef __DRM_I2C_ADV7511_H__
-#define __DRM_I2C_ADV7511_H__
-
-#include <linux/hdmi.h>
-
-#define ADV7511_REG_CHIP_REVISION              0x00
-#define ADV7511_REG_N0                         0x01
-#define ADV7511_REG_N1                         0x02
-#define ADV7511_REG_N2                         0x03
-#define ADV7511_REG_SPDIF_FREQ                 0x04
-#define ADV7511_REG_CTS_AUTOMATIC1             0x05
-#define ADV7511_REG_CTS_AUTOMATIC2             0x06
-#define ADV7511_REG_CTS_MANUAL0                        0x07
-#define ADV7511_REG_CTS_MANUAL1                        0x08
-#define ADV7511_REG_CTS_MANUAL2                        0x09
-#define ADV7511_REG_AUDIO_SOURCE               0x0a
-#define ADV7511_REG_AUDIO_CONFIG               0x0b
-#define ADV7511_REG_I2S_CONFIG                 0x0c
-#define ADV7511_REG_I2S_WIDTH                  0x0d
-#define ADV7511_REG_AUDIO_SUB_SRC0             0x0e
-#define ADV7511_REG_AUDIO_SUB_SRC1             0x0f
-#define ADV7511_REG_AUDIO_SUB_SRC2             0x10
-#define ADV7511_REG_AUDIO_SUB_SRC3             0x11
-#define ADV7511_REG_AUDIO_CFG1                 0x12
-#define ADV7511_REG_AUDIO_CFG2                 0x13
-#define ADV7511_REG_AUDIO_CFG3                 0x14
-#define ADV7511_REG_I2C_FREQ_ID_CFG            0x15
-#define ADV7511_REG_VIDEO_INPUT_CFG1           0x16
-#define ADV7511_REG_CSC_UPPER(x)               (0x18 + (x) * 2)
-#define ADV7511_REG_CSC_LOWER(x)               (0x19 + (x) * 2)
-#define ADV7511_REG_SYNC_DECODER(x)            (0x30 + (x))
-#define ADV7511_REG_DE_GENERATOR               (0x35 + (x))
-#define ADV7511_REG_PIXEL_REPETITION           0x3b
-#define ADV7511_REG_VIC_MANUAL                 0x3c
-#define ADV7511_REG_VIC_SEND                   0x3d
-#define ADV7511_REG_VIC_DETECTED               0x3e
-#define ADV7511_REG_AUX_VIC_DETECTED           0x3f
-#define ADV7511_REG_PACKET_ENABLE0             0x40
-#define ADV7511_REG_POWER                      0x41
-#define ADV7511_REG_STATUS                     0x42
-#define ADV7511_REG_EDID_I2C_ADDR              0x43
-#define ADV7511_REG_PACKET_ENABLE1             0x44
-#define ADV7511_REG_PACKET_I2C_ADDR            0x45
-#define ADV7511_REG_DSD_ENABLE                 0x46
-#define ADV7511_REG_VIDEO_INPUT_CFG2           0x48
-#define ADV7511_REG_INFOFRAME_UPDATE           0x4a
-#define ADV7511_REG_GC(x)                      (0x4b + (x)) /* 0x4b - 0x51 */
-#define ADV7511_REG_AVI_INFOFRAME_VERSION      0x52
-#define ADV7511_REG_AVI_INFOFRAME_LENGTH       0x53
-#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM     0x54
-#define ADV7511_REG_AVI_INFOFRAME(x)           (0x55 + (x)) /* 0x55 - 0x6f */
-#define ADV7511_REG_AUDIO_INFOFRAME_VERSION    0x70
-#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH     0x71
-#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM   0x72
-#define ADV7511_REG_AUDIO_INFOFRAME(x)         (0x73 + (x)) /* 0x73 - 0x7c */
-#define ADV7511_REG_INT_ENABLE(x)              (0x94 + (x))
-#define ADV7511_REG_INT(x)                     (0x96 + (x))
-#define ADV7511_REG_INPUT_CLK_DIV              0x9d
-#define ADV7511_REG_PLL_STATUS                 0x9e
-#define ADV7511_REG_HDMI_POWER                 0xa1
-#define ADV7511_REG_HDCP_HDMI_CFG              0xaf
-#define ADV7511_REG_AN(x)                      (0xb0 + (x)) /* 0xb0 - 0xb7 */
-#define ADV7511_REG_HDCP_STATUS                        0xb8
-#define ADV7511_REG_BCAPS                      0xbe
-#define ADV7511_REG_BKSV(x)                    (0xc0 + (x)) /* 0xc0 - 0xc3 */
-#define ADV7511_REG_EDID_SEGMENT               0xc4
-#define ADV7511_REG_DDC_STATUS                 0xc8
-#define ADV7511_REG_EDID_READ_CTRL             0xc9
-#define ADV7511_REG_BSTATUS(x)                 (0xca + (x)) /* 0xca - 0xcb */
-#define ADV7511_REG_TIMING_GEN_SEQ             0xd0
-#define ADV7511_REG_POWER2                     0xd6
-#define ADV7511_REG_HSYNC_PLACEMENT_MSB                0xfa
-
-#define ADV7511_REG_SYNC_ADJUSTMENT(x)         (0xd7 + (x)) /* 0xd7 - 0xdc */
-#define ADV7511_REG_TMDS_CLOCK_INV             0xde
-#define ADV7511_REG_ARC_CTRL                   0xdf
-#define ADV7511_REG_CEC_I2C_ADDR               0xe1
-#define ADV7511_REG_CEC_CTRL                   0xe2
-#define ADV7511_REG_CHIP_ID_HIGH               0xf5
-#define ADV7511_REG_CHIP_ID_LOW                        0xf6
-
-#define ADV7511_CSC_ENABLE                     BIT(7)
-#define ADV7511_CSC_UPDATE_MODE                        BIT(5)
-
-#define ADV7511_INT0_HPD                       BIT(7)
-#define ADV7511_INT0_VSYNC                     BIT(5)
-#define ADV7511_INT0_AUDIO_FIFO_FULL           BIT(4)
-#define ADV7511_INT0_EDID_READY                        BIT(2)
-#define ADV7511_INT0_HDCP_AUTHENTICATED                BIT(1)
-
-#define ADV7511_INT1_DDC_ERROR                 BIT(7)
-#define ADV7511_INT1_BKSV                      BIT(6)
-#define ADV7511_INT1_CEC_TX_READY              BIT(5)
-#define ADV7511_INT1_CEC_TX_ARBIT_LOST         BIT(4)
-#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT      BIT(3)
-#define ADV7511_INT1_CEC_RX_READY3             BIT(2)
-#define ADV7511_INT1_CEC_RX_READY2             BIT(1)
-#define ADV7511_INT1_CEC_RX_READY1             BIT(0)
-
-#define ADV7511_ARC_CTRL_POWER_DOWN            BIT(0)
-
-#define ADV7511_CEC_CTRL_POWER_DOWN            BIT(0)
-
-#define ADV7511_POWER_POWER_DOWN               BIT(6)
-
-#define ADV7511_HDMI_CFG_MODE_MASK             0x2
-#define ADV7511_HDMI_CFG_MODE_DVI              0x0
-#define ADV7511_HDMI_CFG_MODE_HDMI             0x2
-
-#define ADV7511_AUDIO_SELECT_I2C               0x0
-#define ADV7511_AUDIO_SELECT_SPDIF             0x1
-#define ADV7511_AUDIO_SELECT_DSD               0x2
-#define ADV7511_AUDIO_SELECT_HBR               0x3
-#define ADV7511_AUDIO_SELECT_DST               0x4
-
-#define ADV7511_I2S_SAMPLE_LEN_16              0x2
-#define ADV7511_I2S_SAMPLE_LEN_20              0x3
-#define ADV7511_I2S_SAMPLE_LEN_18              0x4
-#define ADV7511_I2S_SAMPLE_LEN_22              0x5
-#define ADV7511_I2S_SAMPLE_LEN_19              0x8
-#define ADV7511_I2S_SAMPLE_LEN_23              0x9
-#define ADV7511_I2S_SAMPLE_LEN_24              0xb
-#define ADV7511_I2S_SAMPLE_LEN_17              0xc
-#define ADV7511_I2S_SAMPLE_LEN_21              0xd
-
-#define ADV7511_SAMPLE_FREQ_44100              0x0
-#define ADV7511_SAMPLE_FREQ_48000              0x2
-#define ADV7511_SAMPLE_FREQ_32000              0x3
-#define ADV7511_SAMPLE_FREQ_88200              0x8
-#define ADV7511_SAMPLE_FREQ_96000              0xa
-#define ADV7511_SAMPLE_FREQ_176400             0xc
-#define ADV7511_SAMPLE_FREQ_192000             0xe
-
-#define ADV7511_STATUS_POWER_DOWN_POLARITY     BIT(7)
-#define ADV7511_STATUS_HPD                     BIT(6)
-#define ADV7511_STATUS_MONITOR_SENSE           BIT(5)
-#define ADV7511_STATUS_I2S_32BIT_MODE          BIT(3)
-
-#define ADV7511_PACKET_ENABLE_N_CTS            BIT(8+6)
-#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE     BIT(8+5)
-#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME    BIT(8+4)
-#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME  BIT(8+3)
-#define ADV7511_PACKET_ENABLE_GC               BIT(7)
-#define ADV7511_PACKET_ENABLE_SPD              BIT(6)
-#define ADV7511_PACKET_ENABLE_MPEG             BIT(5)
-#define ADV7511_PACKET_ENABLE_ACP              BIT(4)
-#define ADV7511_PACKET_ENABLE_ISRC             BIT(3)
-#define ADV7511_PACKET_ENABLE_GM               BIT(2)
-#define ADV7511_PACKET_ENABLE_SPARE2           BIT(1)
-#define ADV7511_PACKET_ENABLE_SPARE1           BIT(0)
-
-#define ADV7511_REG_POWER2_HPD_SRC_MASK                0xc0
-#define ADV7511_REG_POWER2_HPD_SRC_BOTH                0x00
-#define ADV7511_REG_POWER2_HPD_SRC_HPD         0x40
-#define ADV7511_REG_POWER2_HPD_SRC_CEC         0x80
-#define ADV7511_REG_POWER2_HPD_SRC_NONE                0xc0
-#define ADV7511_REG_POWER2_TDMS_ENABLE         BIT(4)
-#define ADV7511_REG_POWER2_GATE_INPUT_CLK      BIT(0)
-
-#define ADV7511_LOW_REFRESH_RATE_NONE          0x0
-#define ADV7511_LOW_REFRESH_RATE_24HZ          0x1
-#define ADV7511_LOW_REFRESH_RATE_25HZ          0x2
-#define ADV7511_LOW_REFRESH_RATE_30HZ          0x3
-
-#define ADV7511_AUDIO_CFG3_LEN_MASK            0x0f
-#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK      0xf0
-
-#define ADV7511_AUDIO_SOURCE_I2S               0
-#define ADV7511_AUDIO_SOURCE_SPDIF             1
-
-#define ADV7511_I2S_FORMAT_I2S                 0
-#define ADV7511_I2S_FORMAT_RIGHT_J             1
-#define ADV7511_I2S_FORMAT_LEFT_J              2
-
-#define ADV7511_PACKET(p, x)       ((p) * 0x20 + (x))
-#define ADV7511_PACKET_SDP(x)      ADV7511_PACKET(0, x)
-#define ADV7511_PACKET_MPEG(x)     ADV7511_PACKET(1, x)
-#define ADV7511_PACKET_ACP(x)      ADV7511_PACKET(2, x)
-#define ADV7511_PACKET_ISRC1(x)            ADV7511_PACKET(3, x)
-#define ADV7511_PACKET_ISRC2(x)            ADV7511_PACKET(4, x)
-#define ADV7511_PACKET_GM(x)       ADV7511_PACKET(5, x)
-#define ADV7511_PACKET_SPARE(x)            ADV7511_PACKET(6, x)
-
-enum adv7511_input_clock {
-       ADV7511_INPUT_CLOCK_1X,
-       ADV7511_INPUT_CLOCK_2X,
-       ADV7511_INPUT_CLOCK_DDR,
-};
-
-enum adv7511_input_justification {
-       ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
-       ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
-       ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
-};
-
-enum adv7511_input_sync_pulse {
-       ADV7511_INPUT_SYNC_PULSE_DE = 0,
-       ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
-       ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
-       ADV7511_INPUT_SYNC_PULSE_NONE = 3,
-};
-
-/**
- * enum adv7511_sync_polarity - Polarity for the input sync signals
- * @ADV7511_SYNC_POLARITY_PASSTHROUGH:  Sync polarity matches that of
- *                                    the currently configured mode.
- * @ADV7511_SYNC_POLARITY_LOW:     Sync polarity is low
- * @ADV7511_SYNC_POLARITY_HIGH:            Sync polarity is high
- *
- * If the polarity is set to either LOW or HIGH the driver will configure the
- * ADV7511 to internally invert the sync signal if required to match the sync
- * polarity setting for the currently selected output mode.
- *
- * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
- * unchanged. This is used when the upstream graphics core already generates
- * the sync signals with the correct polarity.
- */
-enum adv7511_sync_polarity {
-       ADV7511_SYNC_POLARITY_PASSTHROUGH,
-       ADV7511_SYNC_POLARITY_LOW,
-       ADV7511_SYNC_POLARITY_HIGH,
-};
-
-/**
- * struct adv7511_link_config - Describes adv7511 hardware configuration
- * @input_color_depth:         Number of bits per color component (8, 10 or 12)
- * @input_colorspace:          The input colorspace (RGB, YUV444, YUV422)
- * @input_clock:               The input video clock style (1x, 2x, DDR)
- * @input_style:               The input component arrangement variant
- * @input_justification:       Video input format bit justification
- * @clock_delay:               Clock delay for the input clock (in ps)
- * @embedded_sync:             Video input uses BT.656-style embedded sync
- * @sync_pulse:                        Select the sync pulse
- * @vsync_polarity:            vsync input signal configuration
- * @hsync_polarity:            hsync input signal configuration
- */
-struct adv7511_link_config {
-       unsigned int input_color_depth;
-       enum hdmi_colorspace input_colorspace;
-       enum adv7511_input_clock input_clock;
-       unsigned int input_style;
-       enum adv7511_input_justification input_justification;
-
-       int clock_delay;
-
-       bool embedded_sync;
-       enum adv7511_input_sync_pulse sync_pulse;
-       enum adv7511_sync_polarity vsync_polarity;
-       enum adv7511_sync_polarity hsync_polarity;
-};
-
-/**
- * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
- * @ADV7511_CSC_SCALING_1: CSC results are not scaled
- * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
- * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
- */
-enum adv7511_csc_scaling {
-       ADV7511_CSC_SCALING_1 = 0,
-       ADV7511_CSC_SCALING_2 = 1,
-       ADV7511_CSC_SCALING_4 = 2,
-};
-
-/**
- * struct adv7511_video_config - Describes adv7511 hardware configuration
- * @csc_enable:                        Whether to enable color space conversion
- * @csc_scaling_factor:                Color space conversion scaling factor
- * @csc_coefficents:           Color space conversion coefficents
- * @hdmi_mode:                 Whether to use HDMI or DVI output mode
- * @avi_infoframe:             HDMI infoframe
- */
-struct adv7511_video_config {
-       bool csc_enable;
-       enum adv7511_csc_scaling csc_scaling_factor;
-       const uint16_t *csc_coefficents;
-
-       bool hdmi_mode;
-       struct hdmi_avi_infoframe avi_infoframe;
-};
-
-#endif /* __DRM_I2C_ADV7511_H__ */
index 0594c45f71646d7a512707d7f0b4f89a0875b30b..e9e8ae2ec06b2d52b404f10d96b8cebb7139cee0 100644 (file)
@@ -361,13 +361,8 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
 
                /* Disable the crtc to ensure a full modeset is
                 * performed whenever it's turned on again. */
-               if (crtc) {
-                       struct drm_mode_set modeset = {
-                               .crtc = crtc,
-                       };
-
-                       drm_mode_set_config_internal(&modeset);
-               }
+               if (crtc)
+                       drm_crtc_force_disable(crtc);
        }
 
        return 0;
index 29a32b11953b9eddacec285ba13087b4bc27f9f5..7769e469118f2084f4ee84b70f7aa049cd07239c 100644 (file)
@@ -57,6 +57,28 @@ config DRM_I915_USERPTR
 
          If in doubt, say "Y".
 
+config DRM_I915_GVT
+        bool "Enable Intel GVT-g graphics virtualization host support"
+        depends on DRM_I915
+        default n
+        help
+         Choose this option if you want to enable Intel GVT-g graphics
+         virtualization technology host support with integrated graphics.
+         With GVT-g, it's possible to have one integrated graphics
+         device shared by multiple VMs under different hypervisors.
+
+         Note that at least one hypervisor like Xen or KVM is required for
+         this driver to work, and it only supports newer device from
+         Broadwell+. For further information and setup guide, you can
+         visit: http://01.org/igvt-g.
+
+         Now it's just a stub to support the modifications of i915 for
+         GVT device model. It requires at least one MPT modules for Xen/KVM
+         and other components of GVT device model to work. Use it under
+         you own risk.
+
+         If in doubt, say "N".
+
 menu "drm/i915 Debugging"
 depends on DRM_I915
 depends on EXPERT
index 8f404103341d9e6a349e22de5e89d53d247c4daa..cee87bfd10c45f8e9bec618549e07591f81551ad 100644 (file)
@@ -18,6 +18,9 @@ config DRM_I915_WERROR
 config DRM_I915_DEBUG
         bool "Enable additional driver debugging"
         depends on DRM_I915
+        select PREEMPT_COUNT
+        select X86_MSR # used by igt/pm_rpm
+        select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
         default n
         help
           Choose this option to turn on extra driver debugging that may affect
index 0b88ba0f3c1f4977697603f6306c2fc0287e7b53..684fc1cd08fa90260529ca558b18118d118c8f9c 100644 (file)
@@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
 i915-y := i915_drv.o \
          i915_irq.o \
          i915_params.o \
+         i915_pci.o \
           i915_suspend.o \
          i915_sysfs.o \
          intel_csr.o \
+         intel_device_info.o \
          intel_pm.o \
          intel_runtime_pm.o
 
@@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \
          i915_gem_userptr.o \
          i915_gpu_error.o \
          i915_trace_points.o \
+         intel_breadcrumbs.o \
          intel_lrc.o \
          intel_mocs.o \
          intel_ringbuffer.o \
@@ -59,6 +62,7 @@ i915-y += intel_audio.o \
          intel_bios.o \
          intel_color.o \
          intel_display.o \
+         intel_dpio_phy.o \
          intel_dpll_mgr.o \
          intel_fbc.o \
          intel_fifo_underrun.o \
@@ -81,10 +85,12 @@ i915-y += dvo_ch7017.o \
          dvo_tfp410.o \
          intel_crt.o \
          intel_ddi.o \
+         intel_dp_aux_backlight.o \
          intel_dp_link_training.o \
          intel_dp_mst.o \
          intel_dp.o \
          intel_dsi.o \
+         intel_dsi_dcs_backlight.o \
          intel_dsi_panel_vbt.o \
          intel_dsi_pll.o \
          intel_dvo.o \
@@ -98,8 +104,10 @@ i915-y += dvo_ch7017.o \
 # virtual gpu code
 i915-y += i915_vgpu.o
 
-# legacy horrors
-i915-y += i915_dma.o
+ifeq ($(CONFIG_DRM_I915_GVT),y)
+i915-y += intel_gvt.o
+include $(src)/gvt/Makefile
+endif
 
 obj-$(CONFIG_DRM_I915)  += i915.o
 
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
new file mode 100644 (file)
index 0000000..d0f21a6
--- /dev/null
@@ -0,0 +1,5 @@
+GVT_DIR := gvt
+GVT_SOURCE := gvt.o
+
+ccflags-y                      += -I$(src) -I$(src)/$(GVT_DIR) -Wall
+i915-y                        += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
new file mode 100644 (file)
index 0000000..7ef412b
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __GVT_DEBUG_H__
+#define __GVT_DEBUG_H__
+
+#define gvt_dbg_core(fmt, args...) \
+       DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
+
+/*
+ * Other GVT debug stuff will be introduced in the GVT device model patches.
+ */
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
new file mode 100644 (file)
index 0000000..927f457
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <xen/xen.h>
+
+#include "i915_drv.h"
+
+struct intel_gvt_host intel_gvt_host;
+
+static const char * const supported_hypervisors[] = {
+       [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
+       [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
+};
+
+/**
+ * intel_gvt_init_host - Load MPT modules and detect if we're running in host
+ * @gvt: intel gvt device
+ *
+ * This function is called at the driver loading stage. If failed to find a
+ * loadable MPT module or detect currently we're running in a VM, then GVT-g
+ * will be disabled
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init_host(void)
+{
+       if (intel_gvt_host.initialized)
+               return 0;
+
+       /* Xen DOM U */
+       if (xen_domain() && !xen_initial_domain())
+               return -ENODEV;
+
+       /* Try to load MPT modules for hypervisors */
+       if (xen_initial_domain()) {
+               /* In Xen dom0 */
+               intel_gvt_host.mpt = try_then_request_module(
+                               symbol_get(xengt_mpt), "xengt");
+               intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
+       } else {
+               /* not in Xen. Try KVMGT */
+               intel_gvt_host.mpt = try_then_request_module(
+                               symbol_get(kvmgt_mpt), "kvm");
+               intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
+       }
+
+       /* Fail to load MPT modules - bail out */
+       if (!intel_gvt_host.mpt)
+               return -EINVAL;
+
+       /* Try to detect if we're running in host instead of VM. */
+       if (!intel_gvt_hypervisor_detect_host())
+               return -ENODEV;
+
+       gvt_dbg_core("Running with hypervisor %s in host mode\n",
+                       supported_hypervisors[intel_gvt_host.hypervisor_type]);
+
+       intel_gvt_host.initialized = true;
+       return 0;
+}
+
+static void init_device_info(struct intel_gvt *gvt)
+{
+       if (IS_BROADWELL(gvt->dev_priv))
+               gvt->device_info.max_support_vgpus = 8;
+       /* This function will grow large in GVT device model patches. */
+}
+
+/**
+ * intel_gvt_clean_device - clean a GVT device
+ * @gvt: intel gvt device
+ *
+ * This function is called at the driver unloading stage, to free the
+ * resources owned by a GVT device.
+ *
+ */
+void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+{
+       struct intel_gvt *gvt = &dev_priv->gvt;
+
+       if (WARN_ON(!gvt->initialized))
+               return;
+
+       /* Other de-initialization of GVT components will be introduced. */
+
+       gvt->initialized = false;
+}
+
+/**
+ * intel_gvt_init_device - initialize a GVT device
+ * @dev_priv: drm i915 private data
+ *
+ * This function is called at the initialization stage, to initialize
+ * necessary GVT components.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+{
+       struct intel_gvt *gvt = &dev_priv->gvt;
+       /*
+        * Cannot initialize GVT device without intel_gvt_host gets
+        * initialized first.
+        */
+       if (WARN_ON(!intel_gvt_host.initialized))
+               return -EINVAL;
+
+       if (WARN_ON(gvt->initialized))
+               return -EEXIST;
+
+       gvt_dbg_core("init gvt device\n");
+
+       init_device_info(gvt);
+       /*
+        * Other initialization of GVT components will be introduce here.
+        */
+       gvt_dbg_core("gvt device creation is done\n");
+       gvt->initialized = true;
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
new file mode 100644 (file)
index 0000000..fb619a6
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_H_
+#define _GVT_H_
+
+#include "debug.h"
+#include "hypercall.h"
+
+#define GVT_MAX_VGPU 8
+
+enum {
+       INTEL_GVT_HYPERVISOR_XEN = 0,
+       INTEL_GVT_HYPERVISOR_KVM,
+};
+
+struct intel_gvt_host {
+       bool initialized;
+       int hypervisor_type;
+       struct intel_gvt_mpt *mpt;
+};
+
+extern struct intel_gvt_host intel_gvt_host;
+
+/* Describe per-platform limitations. */
+struct intel_gvt_device_info {
+       u32 max_support_vgpus;
+       /* This data structure will grow bigger in GVT device model patches */
+};
+
+struct intel_vgpu {
+       struct intel_gvt *gvt;
+       int id;
+       unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
+};
+
+struct intel_gvt {
+       struct mutex lock;
+       bool initialized;
+
+       struct drm_i915_private *dev_priv;
+       struct idr vgpu_idr;    /* vGPU IDR pool */
+
+       struct intel_gvt_device_info device_info;
+};
+
+#include "mpt.h"
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
new file mode 100644 (file)
index 0000000..254df8b
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_HYPERCALL_H_
+#define _GVT_HYPERCALL_H_
+
+/*
+ * Specific GVT-g MPT modules function collections. Currently GVT-g supports
+ * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
+ */
+struct intel_gvt_mpt {
+       int (*detect_host)(void);
+};
+
+extern struct intel_gvt_mpt xengt_mpt;
+extern struct intel_gvt_mpt kvmgt_mpt;
+
+#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
new file mode 100644 (file)
index 0000000..03601e3
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _GVT_MPT_H_
+#define _GVT_MPT_H_
+
+/**
+ * DOC: Hypervisor Service APIs for GVT-g Core Logic
+ *
+ * This is the glue layer between specific hypervisor MPT modules and GVT-g core
+ * logic. Each kind of hypervisor MPT module provides a collection of function
+ * callbacks and will be attached to GVT host when the driver is loading.
+ * GVT-g core logic will call these APIs to request specific services from
+ * hypervisor.
+ */
+
+/**
+ * intel_gvt_hypervisor_detect_host - check if GVT-g is running within
+ * hypervisor host/privilged domain
+ *
+ * Returns:
+ * Zero on success, -ENODEV if current kernel is running inside a VM
+ */
+static inline int intel_gvt_hypervisor_detect_host(void)
+{
+       return intel_gvt_host.mpt->detect_host();
+}
+
+#endif /* _GVT_MPT_H_ */
index a337f33bec5b2eb92bae7ce8860d6e6a84042458..b0fd6a7b060380d5d2d8dfe94192a3f3f59dcb9f 100644 (file)
@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
        CMD(  MI_RS_CONTEXT,                    SMI,    F,  1,      S  ),
        CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
        CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
-       CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   R  ),
+       CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   W,
+             .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 }    ),
        CMD(  MI_RS_STORE_DATA_IMM,             SMI,   !F,  0xFF,   S  ),
        CMD(  MI_LOAD_URB_MEM,                  SMI,   !F,  0xFF,   S  ),
        CMD(  MI_STORE_URB_MEM,                 SMI,   !F,  0xFF,   S  ),
@@ -736,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
 
 /**
  * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
- * @ring: the ringbuffer to initialize
+ * @engine: the engine to initialize
  *
  * Optionally initializes fields related to batch buffer command parsing in the
  * struct intel_engine_cs based on whether the platform requires software
@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
        int cmd_table_count;
        int ret;
 
-       if (!IS_GEN7(engine->dev))
+       if (!IS_GEN7(engine->i915))
                return 0;
 
        switch (engine->id) {
        case RCS:
-               if (IS_HASWELL(engine->dev)) {
+               if (IS_HASWELL(engine->i915)) {
                        cmd_tables = hsw_render_ring_cmds;
                        cmd_table_count =
                                ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
                        cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
                }
 
-               if (IS_HASWELL(engine->dev)) {
+               if (IS_HASWELL(engine->i915)) {
                        engine->reg_tables = hsw_render_reg_tables;
                        engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
                } else {
@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
                engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
                break;
        case BCS:
-               if (IS_HASWELL(engine->dev)) {
+               if (IS_HASWELL(engine->i915)) {
                        cmd_tables = hsw_blt_ring_cmds;
                        cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
                } else {
@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
                        cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
                }
 
-               if (IS_HASWELL(engine->dev)) {
+               if (IS_HASWELL(engine->i915)) {
                        engine->reg_tables = hsw_blt_reg_tables;
                        engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
                } else {
@@ -829,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
 
 /**
  * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
- * @ring: the ringbuffer to clean up
+ * @engine: the engine to clean up
  *
  * Releases any resources related to command parsing that may have been
  * initialized for the specified ring.
@@ -1023,7 +1024,7 @@ unpin_src:
 
 /**
  * i915_needs_cmd_parser() - should a given ring use software command parsing?
- * @ring: the ring in question
+ * @engine: the engine in question
  *
  * Only certain platforms require software batch buffer command parsing, and
  * only when enabled via module parameter.
@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
        if (!engine->needs_cmd_parser)
                return false;
 
-       if (!USES_PPGTT(engine->dev))
+       if (!USES_PPGTT(engine->i915))
                return false;
 
        return (i915.enable_cmd_parser == 1);
@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
                                        return false;
                                }
 
+                               if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+                                       DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
+                                       return false;
+                               }
+
                                if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
                                        *oacontrol_set = (cmd[offset + 1] != 0);
                        }
@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
                                        return false;
                                }
 
+                               if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
+                                       DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
+                                                        reg_addr);
+                                       return false;
+                               }
+
                                if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
                                    (offset + 2 > length ||
                                     (cmd[offset + 1] & reg->mask) != reg->value)) {
@@ -1164,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
 
 /**
  * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
- * @ring: the ring on which the batch is to execute
+ * @engine: the engine on which the batch is to execute
  * @batch_obj: the batch buffer in question
  * @shadow_batch_obj: copy of the batch buffer in question
  * @batch_start_offset: byte offset in the batch at which execution starts
@@ -1269,14 +1281,28 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
 
 /**
  * i915_cmd_parser_get_version() - get the cmd parser version number
+ * @dev_priv: i915 device private
  *
  * The cmd parser maintains a simple increasing integer version number suitable
  * for passing to userspace clients to determine what operations are permitted.
  *
  * Return: the current version number of the cmd parser
  */
-int i915_cmd_parser_get_version(void)
+int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
 {
+       struct intel_engine_cs *engine;
+       bool active = false;
+
+       /* If the command parser is not enabled, report 0 - unsupported */
+       for_each_engine(engine, dev_priv) {
+               if (i915_needs_cmd_parser(engine)) {
+                       active = true;
+                       break;
+               }
+       }
+       if (!active)
+               return 0;
+
        /*
         * Command parser version history
         *
@@ -1288,6 +1314,7 @@ int i915_cmd_parser_get_version(void)
         * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
         * 5. GPGPU dispatch compute indirect registers.
         * 6. TIMESTAMP register and Haswell CS GPR registers
+        * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
         */
-       return 6;
+       return 7;
 }
index 32690332d441dc16b7ba51bd641add5e984cc52e..844fea795bae1b860588fa22bc6ffc1621d12cb4 100644 (file)
@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
        return 0;
 }
 
-static const char get_active_flag(struct drm_i915_gem_object *obj)
+static char get_active_flag(struct drm_i915_gem_object *obj)
 {
        return obj->active ? '*' : ' ';
 }
 
-static const char get_pin_flag(struct drm_i915_gem_object *obj)
+static char get_pin_flag(struct drm_i915_gem_object *obj)
 {
        return obj->pin_display ? 'p' : ' ';
 }
 
-static const char get_tiling_flag(struct drm_i915_gem_object *obj)
+static char get_tiling_flag(struct drm_i915_gem_object *obj)
 {
        switch (obj->tiling_mode) {
        default:
@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
        }
 }
 
-static inline const char get_global_flag(struct drm_i915_gem_object *obj)
+static char get_global_flag(struct drm_i915_gem_object *obj)
 {
        return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
 }
 
-static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
 {
        return obj->mapping ? 'M' : ' ';
 }
@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 }
 
-static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
-{
-       seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
-       seq_putc(m, ctx->remap_slice ? 'R' : 'r');
-       seq_putc(m, ' ');
-}
-
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
@@ -272,7 +265,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        u64 total_obj_size, total_gtt_size;
        LIST_HEAD(stolen);
@@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m,
        print_file_stats(m, "[k]batch pool", stats);
 }
 
+static int per_file_ctx_stats(int id, void *ptr, void *data)
+{
+       struct i915_gem_context *ctx = ptr;
+       int n;
+
+       for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
+               if (ctx->engine[n].state)
+                       per_file_stats(0, ctx->engine[n].state, data);
+               if (ctx->engine[n].ringbuf)
+                       per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
+       }
+
+       return 0;
+}
+
+static void print_context_stats(struct seq_file *m,
+                               struct drm_i915_private *dev_priv)
+{
+       struct file_stats stats;
+       struct drm_file *file;
+
+       memset(&stats, 0, sizeof(stats));
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       if (dev_priv->kernel_context)
+               per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
+
+       list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
+               struct drm_i915_file_private *fpriv = file->driver_priv;
+               idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
+       }
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       print_file_stats(m, "[k]contexts", stats);
+}
+
 #define count_vmas(list, member) do { \
        list_for_each_entry(vma, list, member) { \
                size += i915_gem_obj_total_ggtt_size(vma->obj); \
@@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 
        seq_putc(m, '\n');
        print_batch_pool_stats(m, dev_priv);
-
        mutex_unlock(&dev->struct_mutex);
 
        mutex_lock(&dev->filelist_mutex);
+       print_context_stats(m, dev_priv);
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct file_stats stats;
                struct task_struct *task;
@@ -562,7 +591,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        uintptr_t list = (uintptr_t) node->info_ent->data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        u64 total_obj_size, total_gtt_size;
        int count, ret;
@@ -596,7 +625,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc;
        int ret;
 
@@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
        for_each_intel_crtc(dev, crtc) {
                const char pipe = pipe_name(crtc->pipe);
                const char plane = plane_name(crtc->plane);
-               struct intel_unpin_work *work;
+               struct intel_flip_work *work;
 
                spin_lock_irq(&dev->event_lock);
-               work = crtc->unpin_work;
+               work = crtc->flip_work;
                if (work == NULL) {
                        seq_printf(m, "No flip due on pipe %c (plane %c)\n",
                                   pipe, plane);
                } else {
+                       u32 pending;
                        u32 addr;
 
-                       if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
-                               seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+                       pending = atomic_read(&work->pending);
+                       if (pending) {
+                               seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
                                           pipe, plane);
                        } else {
                                seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
@@ -631,18 +662,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                           engine->name,
                                           i915_gem_request_get_seqno(work->flip_queued_req),
                                           dev_priv->next_seqno,
-                                          engine->get_seqno(engine),
-                                          i915_gem_request_completed(work->flip_queued_req, true));
+                                          intel_engine_get_seqno(engine),
+                                          i915_gem_request_completed(work->flip_queued_req));
                        } else
                                seq_printf(m, "Flip not associated with any ring\n");
                        seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
                                   work->flip_queued_vblank,
                                   work->flip_ready_vblank,
-                                  drm_crtc_vblank_count(&crtc->base));
-                       if (work->enable_stall_check)
-                               seq_puts(m, "Stall check enabled, ");
-                       else
-                               seq_puts(m, "Stall check waiting for page flip ioctl, ");
+                                  intel_crtc_get_vblank_counter(crtc));
                        seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
                        if (INTEL_INFO(dev)->gen >= 4)
@@ -668,7 +695,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        struct intel_engine_cs *engine;
        int total = 0;
@@ -713,7 +740,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        struct drm_i915_gem_request *req;
        int ret, any;
@@ -761,17 +788,29 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 static void i915_ring_seqno_info(struct seq_file *m,
                                 struct intel_engine_cs *engine)
 {
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct rb_node *rb;
+
        seq_printf(m, "Current sequence (%s): %x\n",
-                  engine->name, engine->get_seqno(engine));
-       seq_printf(m, "Current user interrupts (%s): %x\n",
-                  engine->name, READ_ONCE(engine->user_interrupts));
+                  engine->name, intel_engine_get_seqno(engine));
+       seq_printf(m, "Current user interrupts (%s): %lx\n",
+                  engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups));
+
+       spin_lock(&b->lock);
+       for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+               struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+               seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
+                          engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
+       }
+       spin_unlock(&b->lock);
 }
 
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        int ret;
 
@@ -794,7 +833,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        int ret, i, pipe;
 
@@ -985,7 +1024,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i, ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1013,7 +1052,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        const u32 *hws;
        int i;
@@ -1124,7 +1163,7 @@ static int
 i915_next_seqno_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1161,7 +1200,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
        intel_runtime_pm_get(dev_priv);
@@ -1281,6 +1320,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                }
                seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
                           pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
+               seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
                seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
                seq_printf(m, "Render p-state ratio: %d\n",
                           (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1363,7 +1403,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        u64 acthd[I915_NUM_ENGINES];
        u32 seqno[I915_NUM_ENGINES];
@@ -1380,10 +1420,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 
        for_each_engine_id(engine, dev_priv, id) {
                acthd[id] = intel_ring_get_active_head(engine);
-               seqno[id] = engine->get_seqno(engine);
+               seqno[id] = intel_engine_get_seqno(engine);
        }
 
-       i915_get_extra_instdone(dev, instdone);
+       i915_get_extra_instdone(dev_priv, instdone);
 
        intel_runtime_pm_put(dev_priv);
 
@@ -1400,9 +1440,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
                           engine->hangcheck.seqno,
                           seqno[id],
                           engine->last_submitted_seqno);
-               seq_printf(m, "\tuser interrupts = %x [current %x]\n",
+               seq_printf(m, "\twaiters? %d\n",
+                          intel_engine_has_waiter(engine));
+               seq_printf(m, "\tuser interrupts = %lx [current %lx]\n",
                           engine->hangcheck.user_interrupts,
-                          READ_ONCE(engine->user_interrupts));
+                          READ_ONCE(engine->breadcrumbs.irq_wakeups));
                seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
                           (long long)engine->hangcheck.acthd,
                           (long long)acthd[id]);
@@ -1432,7 +1474,7 @@ static int ironlake_drpc_info(struct seq_file *m)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 rgvmodectl, rstdbyctl;
        u16 crstandvid;
        int ret;
@@ -1500,7 +1542,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_uncore_forcewake_domain *fw_domain;
 
        spin_lock_irq(&dev_priv->uncore.lock);
@@ -1518,7 +1560,7 @@ static int vlv_drpc_info(struct seq_file *m)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 rpmodectl1, rcctl1, pw_status;
 
        intel_runtime_pm_get(dev_priv);
@@ -1558,7 +1600,7 @@ static int gen6_drpc_info(struct seq_file *m)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
        unsigned forcewake_count;
        int count = 0, ret;
@@ -1670,7 +1712,7 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        seq_printf(m, "FB tracking busy bits: 0x%08x\n",
                   dev_priv->fb_tracking.busy_bits);
@@ -1685,7 +1727,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!HAS_FBC(dev)) {
                seq_puts(m, "FBC unsupported on this chipset\n");
@@ -1715,7 +1757,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 static int i915_fbc_fc_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
                return -ENODEV;
@@ -1728,7 +1770,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
 static int i915_fbc_fc_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 reg;
 
        if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
@@ -1755,7 +1797,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!HAS_IPS(dev)) {
                seq_puts(m, "not supported\n");
@@ -1785,7 +1827,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool sr_enabled = false;
 
        intel_runtime_pm_get(dev_priv);
@@ -1814,7 +1856,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long temp, chipset, gfx;
        int ret;
 
@@ -1842,7 +1884,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
        int gpu_freq, ia_freq;
        unsigned int max_gpu_freq, min_gpu_freq;
@@ -1897,7 +1939,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_opregion *opregion = &dev_priv->opregion;
        int ret;
 
@@ -1918,7 +1960,7 @@ static int i915_vbt(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_opregion *opregion = &dev_priv->opregion;
 
        if (opregion->vbt)
@@ -1940,19 +1982,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                return ret;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-       if (to_i915(dev)->fbdev) {
-               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
-
-               seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
-                         fbdev_fb->base.width,
-                         fbdev_fb->base.height,
-                         fbdev_fb->base.depth,
-                         fbdev_fb->base.bits_per_pixel,
-                         fbdev_fb->base.modifier[0],
-                         drm_framebuffer_read_refcount(&fbdev_fb->base));
-               describe_obj(m, fbdev_fb->obj);
-               seq_putc(m, '\n');
-       }
+       if (to_i915(dev)->fbdev) {
+               fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
+
+               seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+                          fbdev_fb->base.width,
+                          fbdev_fb->base.height,
+                          fbdev_fb->base.depth,
+                          fbdev_fb->base.bits_per_pixel,
+                          fbdev_fb->base.modifier[0],
+                          drm_framebuffer_read_refcount(&fbdev_fb->base));
+               describe_obj(m, fbdev_fb->obj);
+               seq_putc(m, '\n');
+       }
 #endif
 
        mutex_lock(&dev->mode_config.fb_lock);
@@ -1989,10 +2031,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
-       struct intel_context *ctx;
-       enum intel_engine_id id;
+       struct i915_gem_context *ctx;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2000,32 +2041,36 @@ static int i915_context_status(struct seq_file *m, void *unused)
                return ret;
 
        list_for_each_entry(ctx, &dev_priv->context_list, link) {
-               if (!i915.enable_execlists &&
-                   ctx->legacy_hw_ctx.rcs_state == NULL)
-                       continue;
-
-               seq_puts(m, "HW context ");
-               describe_ctx(m, ctx);
-               if (ctx == dev_priv->kernel_context)
-                       seq_printf(m, "(kernel context) ");
+               seq_printf(m, "HW context %u ", ctx->hw_id);
+               if (IS_ERR(ctx->file_priv)) {
+                       seq_puts(m, "(deleted) ");
+               } else if (ctx->file_priv) {
+                       struct pid *pid = ctx->file_priv->file->pid;
+                       struct task_struct *task;
 
-               if (i915.enable_execlists) {
-                       seq_putc(m, '\n');
-                       for_each_engine_id(engine, dev_priv, id) {
-                               struct drm_i915_gem_object *ctx_obj =
-                                       ctx->engine[id].state;
-                               struct intel_ringbuffer *ringbuf =
-                                       ctx->engine[id].ringbuf;
-
-                               seq_printf(m, "%s: ", engine->name);
-                               if (ctx_obj)
-                                       describe_obj(m, ctx_obj);
-                               if (ringbuf)
-                                       describe_ctx_ringbuf(m, ringbuf);
-                               seq_putc(m, '\n');
+                       task = get_pid_task(pid, PIDTYPE_PID);
+                       if (task) {
+                               seq_printf(m, "(%s [%d]) ",
+                                          task->comm, task->pid);
+                               put_task_struct(task);
                        }
                } else {
-                       describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
+                       seq_puts(m, "(kernel) ");
+               }
+
+               seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+               seq_putc(m, '\n');
+
+               for_each_engine(engine, dev_priv) {
+                       struct intel_context *ce = &ctx->engine[engine->id];
+
+                       seq_printf(m, "%s: ", engine->name);
+                       seq_putc(m, ce->initialised ? 'I' : 'i');
+                       if (ce->state)
+                               describe_obj(m, ce->state);
+                       if (ce->ringbuf)
+                               describe_ctx_ringbuf(m, ce->ringbuf);
+                       seq_putc(m, '\n');
                }
 
                seq_putc(m, '\n');
@@ -2037,24 +2082,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
 }
 
 static void i915_dump_lrc_obj(struct seq_file *m,
-                             struct intel_context *ctx,
+                             struct i915_gem_context *ctx,
                              struct intel_engine_cs *engine)
 {
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
        struct page *page;
        uint32_t *reg_state;
        int j;
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
        unsigned long ggtt_offset = 0;
 
+       seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
+
        if (ctx_obj == NULL) {
-               seq_printf(m, "Context on %s with no gem object\n",
-                          engine->name);
+               seq_puts(m, "\tNot allocated\n");
                return;
        }
 
-       seq_printf(m, "CONTEXT: %s %u\n", engine->name,
-                  intel_execlists_ctx_id(ctx, engine));
-
        if (!i915_gem_obj_ggtt_bound(ctx_obj))
                seq_puts(m, "\tNot bound in GGTT\n");
        else
@@ -2085,9 +2128,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        if (!i915.enable_execlists) {
@@ -2100,9 +2143,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
                return ret;
 
        list_for_each_entry(ctx, &dev_priv->context_list, link)
-               if (ctx != dev_priv->kernel_context)
-                       for_each_engine(engine, dev_priv)
-                               i915_dump_lrc_obj(m, ctx, engine);
+               for_each_engine(engine, dev_priv)
+                       i915_dump_lrc_obj(m, ctx, engine);
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -2113,7 +2155,7 @@ static int i915_execlists(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *)m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        u32 status_pointer;
        u8 read_pointer;
@@ -2173,8 +2215,8 @@ static int i915_execlists(struct seq_file *m, void *data)
 
                seq_printf(m, "\t%d requests in queue\n", count);
                if (head_req) {
-                       seq_printf(m, "\tHead request id: %u\n",
-                                  intel_execlists_ctx_id(head_req->ctx, engine));
+                       seq_printf(m, "\tHead request context: %u\n",
+                                  head_req->ctx->hw_id);
                        seq_printf(m, "\tHead request tail: %u\n",
                                   head_req->tail);
                }
@@ -2216,7 +2258,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2268,7 +2310,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
 
 static int per_file_ctx(int id, void *ptr, void *data)
 {
-       struct intel_context *ctx = ptr;
+       struct i915_gem_context *ctx = ptr;
        struct seq_file *m = data;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
 
@@ -2289,7 +2331,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
 
 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
        int i;
@@ -2310,15 +2352,15 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 
 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
-       if (INTEL_INFO(dev)->gen == 6)
+       if (IS_GEN6(dev_priv))
                seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
 
        for_each_engine(engine, dev_priv) {
                seq_printf(m, "%s\n", engine->name);
-               if (INTEL_INFO(dev)->gen == 7)
+               if (IS_GEN7(dev_priv))
                        seq_printf(m, "GFX_MODE: 0x%08x\n",
                                   I915_READ(RING_MODE_GEN7(engine)));
                seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
@@ -2344,7 +2386,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_file *file;
 
        int ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2365,16 +2407,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
                task = get_pid_task(file->pid, PIDTYPE_PID);
                if (!task) {
                        ret = -ESRCH;
-                       goto out_put;
+                       goto out_unlock;
                }
                seq_printf(m, "\nproc: %s\n", task->comm);
                put_task_struct(task);
                idr_for_each(&file_priv->context_idr, per_file_ctx,
                             (void *)(unsigned long)m);
        }
+out_unlock:
        mutex_unlock(&dev->filelist_mutex);
 
-out_put:
        intel_runtime_pm_put(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
@@ -2387,7 +2429,7 @@ static int count_irq_waiters(struct drm_i915_private *i915)
        int count = 0;
 
        for_each_engine(engine, i915)
-               count += engine->irq_refcount;
+               count += intel_engine_has_waiter(engine);
 
        return count;
 }
@@ -2396,11 +2438,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_file *file;
 
        seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
-       seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
+       seq_printf(m, "GPU busy? %s [%x]\n",
+                  yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
        seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
        seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
                   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
@@ -2441,7 +2484,7 @@ static int i915_llc(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const bool edram = INTEL_GEN(dev_priv) > 8;
 
        seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
@@ -2454,7 +2497,7 @@ static int i915_llc(struct seq_file *m, void *data)
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
-       struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        u32 tmp, i;
 
@@ -2509,15 +2552,16 @@ static void i915_guc_client_info(struct seq_file *m,
        seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
                client->wq_size, client->wq_offset, client->wq_tail);
 
+       seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
        seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
        seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
        seq_printf(m, "\tLast submission result: %d\n", client->retcode);
 
        for_each_engine(engine, dev_priv) {
                seq_printf(m, "\tSubmissions: %llu %s\n",
-                               client->submissions[engine->guc_id],
+                               client->submissions[engine->id],
                                engine->name);
-               tot += client->submissions[engine->guc_id];
+               tot += client->submissions[engine->id];
        }
        seq_printf(m, "\tTotal: %llu\n", tot);
 }
@@ -2526,7 +2570,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc guc;
        struct i915_guc_client client = {};
        struct intel_engine_cs *engine;
@@ -2545,6 +2589,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
 
        mutex_unlock(&dev->struct_mutex);
 
+       seq_printf(m, "Doorbell map:\n");
+       seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
+       seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);
+
        seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
        seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
        seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
@@ -2554,9 +2602,9 @@ static int i915_guc_info(struct seq_file *m, void *data)
        seq_printf(m, "\nGuC submissions:\n");
        for_each_engine(engine, dev_priv) {
                seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
-                       engine->name, guc.submissions[engine->guc_id],
-                       guc.last_seqno[engine->guc_id]);
-               total += guc.submissions[engine->guc_id];
+                       engine->name, guc.submissions[engine->id],
+                       guc.last_seqno[engine->id]);
+               total += guc.submissions[engine->id];
        }
        seq_printf(m, "\t%s: %llu\n", "Total", total);
 
@@ -2572,7 +2620,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
        u32 *log;
        int i = 0, pg;
@@ -2600,7 +2648,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 psrperf = 0;
        u32 stat[3];
        enum pipe pipe;
@@ -2668,7 +2716,6 @@ static int i915_sink_crc(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct intel_encoder *encoder;
        struct intel_connector *connector;
        struct intel_dp *intel_dp = NULL;
        int ret;
@@ -2676,18 +2723,19 @@ static int i915_sink_crc(struct seq_file *m, void *data)
 
        drm_modeset_lock_all(dev);
        for_each_intel_connector(dev, connector) {
+               struct drm_crtc *crtc;
 
-               if (connector->base.dpms != DRM_MODE_DPMS_ON)
+               if (!connector->base.state->best_encoder)
                        continue;
 
-               if (!connector->base.encoder)
+               crtc = connector->base.state->crtc;
+               if (!crtc->state->active)
                        continue;
 
-               encoder = to_intel_encoder(connector->base.encoder);
-               if (encoder->type != INTEL_OUTPUT_EDP)
+               if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
                        continue;
 
-               intel_dp = enc_to_intel_dp(&encoder->base);
+               intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
 
                ret = intel_dp_sink_crc(intel_dp, crc);
                if (ret)
@@ -2708,7 +2756,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u64 power;
        u32 units;
 
@@ -2734,12 +2782,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!HAS_RUNTIME_PM(dev_priv))
                seq_puts(m, "Runtime power management not supported\n");
 
-       seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
+       seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
        seq_printf(m, "IRQs disabled: %s\n",
                   yesno(!intel_irqs_enabled(dev_priv)));
 #ifdef CONFIG_PM
@@ -2749,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
        seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
 #endif
        seq_printf(m, "PCI device power state: %s [%d]\n",
-                  pci_power_name(dev_priv->dev->pdev->current_state),
-                  dev_priv->dev->pdev->current_state);
+                  pci_power_name(dev_priv->drm.pdev->current_state),
+                  dev_priv->drm.pdev->current_state);
 
        return 0;
 }
@@ -2759,7 +2807,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
        int i;
 
@@ -2794,7 +2842,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_csr *csr;
 
        if (!HAS_CSR(dev)) {
@@ -2917,7 +2965,7 @@ static void intel_dp_info(struct seq_file *m,
 
        seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
        seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
-       if (intel_encoder->type == INTEL_OUTPUT_EDP)
+       if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
                intel_panel_info(m, &intel_connector->panel);
 }
 
@@ -2956,14 +3004,26 @@ static void intel_connector_info(struct seq_file *m,
                seq_printf(m, "\tCEA rev: %d\n",
                           connector->display_info.cea_rev);
        }
-       if (intel_encoder) {
-               if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
-                   intel_encoder->type == INTEL_OUTPUT_EDP)
-                       intel_dp_info(m, intel_connector);
-               else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
-                       intel_hdmi_info(m, intel_connector);
-               else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+
+       if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+               return;
+
+       switch (connector->connector_type) {
+       case DRM_MODE_CONNECTOR_DisplayPort:
+       case DRM_MODE_CONNECTOR_eDP:
+               intel_dp_info(m, intel_connector);
+               break;
+       case DRM_MODE_CONNECTOR_LVDS:
+               if (intel_encoder->type == INTEL_OUTPUT_LVDS)
                        intel_lvds_info(m, intel_connector);
+               break;
+       case DRM_MODE_CONNECTOR_HDMIA:
+               if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
+                   intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
+                       intel_hdmi_info(m, intel_connector);
+               break;
+       default:
+               break;
        }
 
        seq_printf(m, "\tmodes:\n");
@@ -2973,7 +3033,7 @@ static void intel_connector_info(struct seq_file *m,
 
 static bool cursor_active(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 state;
 
        if (IS_845G(dev) || IS_I865G(dev))
@@ -2986,7 +3046,7 @@ static bool cursor_active(struct drm_device *dev, int pipe)
 
 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pos;
 
        pos = I915_READ(CURPOS(pipe));
@@ -3107,7 +3167,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc;
        struct drm_connector *connector;
 
@@ -3162,13 +3222,13 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
        enum intel_engine_id id;
        int j, ret;
 
-       if (!i915_semaphore_is_enabled(dev)) {
+       if (!i915_semaphore_is_enabled(dev_priv)) {
                seq_puts(m, "Semaphores are disabled\n");
                return 0;
        }
@@ -3235,7 +3295,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        drm_modeset_lock_all(dev);
@@ -3265,7 +3325,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
        struct intel_engine_cs *engine;
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_workarounds *workarounds = &dev_priv->workarounds;
        enum intel_engine_id id;
 
@@ -3303,7 +3363,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_ddb_allocation *ddb;
        struct skl_ddb_entry *entry;
        enum pipe pipe;
@@ -3341,31 +3401,16 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
 static void drrs_status_per_crtc(struct seq_file *m,
                struct drm_device *dev, struct intel_crtc *intel_crtc)
 {
-       struct intel_encoder *intel_encoder;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_drrs *drrs = &dev_priv->drrs;
        int vrefresh = 0;
+       struct drm_connector *connector;
 
-       for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
-               /* Encoder connected on this CRTC */
-               switch (intel_encoder->type) {
-               case INTEL_OUTPUT_EDP:
-                       seq_puts(m, "eDP:\n");
-                       break;
-               case INTEL_OUTPUT_DSI:
-                       seq_puts(m, "DSI:\n");
-                       break;
-               case INTEL_OUTPUT_HDMI:
-                       seq_puts(m, "HDMI:\n");
-                       break;
-               case INTEL_OUTPUT_DISPLAYPORT:
-                       seq_puts(m, "DP:\n");
-                       break;
-               default:
-                       seq_printf(m, "Other encoder (id=%d).\n",
-                                               intel_encoder->type);
-                       return;
-               }
+       drm_for_each_connector(connector, dev) {
+               if (connector->state->crtc != &intel_crtc->base)
+                       continue;
+
+               seq_printf(m, "%s:\n", connector->name);
        }
 
        if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
@@ -3428,18 +3473,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
        struct intel_crtc *intel_crtc;
        int active_crtc_cnt = 0;
 
+       drm_modeset_lock_all(dev);
        for_each_intel_crtc(dev, intel_crtc) {
-               drm_modeset_lock(&intel_crtc->base.mutex, NULL);
-
                if (intel_crtc->base.state->active) {
                        active_crtc_cnt++;
                        seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
 
                        drrs_status_per_crtc(m, dev, intel_crtc);
                }
-
-               drm_modeset_unlock(&intel_crtc->base.mutex);
        }
+       drm_modeset_unlock_all(dev);
 
        if (!active_crtc_cnt)
                seq_puts(m, "No active crtc found\n");
@@ -3457,17 +3500,23 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       struct drm_encoder *encoder;
        struct intel_encoder *intel_encoder;
        struct intel_digital_port *intel_dig_port;
+       struct drm_connector *connector;
+
        drm_modeset_lock_all(dev);
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-               intel_encoder = to_intel_encoder(encoder);
-               if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+       drm_for_each_connector(connector, dev) {
+               if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
                        continue;
-               intel_dig_port = enc_to_dig_port(encoder);
+
+               intel_encoder = intel_attached_encoder(connector);
+               if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+                       continue;
+
+               intel_dig_port = enc_to_dig_port(&intel_encoder->base);
                if (!intel_dig_port->dp.can_mst)
                        continue;
+
                seq_printf(m, "MST Source Port %c\n",
                           port_name(intel_dig_port->port));
                drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
@@ -3479,7 +3528,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 {
        struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = info->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(info->dev);
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
        if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
@@ -3503,7 +3552,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
 {
        struct pipe_crc_info *info = inode->i_private;
-       struct drm_i915_private *dev_priv = info->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(info->dev);
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
 
        spin_lock_irq(&pipe_crc->lock);
@@ -3531,7 +3580,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
 {
        struct pipe_crc_info *info = filep->private_data;
        struct drm_device *dev = info->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
        char buf[PIPE_CRC_BUFFER_LEN];
        int n_entries;
@@ -3664,7 +3713,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
 static int display_crc_ctl_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3725,7 +3774,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
                case INTEL_OUTPUT_TVOUT:
                        *source = INTEL_PIPE_CRC_SOURCE_TV;
                        break;
-               case INTEL_OUTPUT_DISPLAYPORT:
+               case INTEL_OUTPUT_DP:
                case INTEL_OUTPUT_EDP:
                        dig_port = enc_to_dig_port(&encoder->base);
                        switch (dig_port->port) {
@@ -3758,7 +3807,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
                                enum intel_pipe_crc_source *source,
                                uint32_t *val)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool need_stable_symbols = false;
 
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3829,7 +3878,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
                                 enum intel_pipe_crc_source *source,
                                 uint32_t *val)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool need_stable_symbols = false;
 
        if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3903,7 +3952,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
                                         enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
        switch (pipe) {
@@ -3928,7 +3977,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
                                         enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp = I915_READ(PORT_DFT2_G4X);
 
        if (pipe == PIPE_A)
@@ -3971,7 +4020,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
 
 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc =
                to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
        struct intel_crtc_state *pipe_config;
@@ -4039,7 +4088,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
                               enum intel_pipe_crc_source source)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
        struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
                                                                        pipe));
@@ -4546,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 static int pri_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4562,7 +4611,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
 static int spr_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4578,7 +4627,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
 static int cur_wm_latency_show(struct seq_file *m, void *data)
 {
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4669,7 +4718,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4685,7 +4734,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4701,7 +4750,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
 {
        struct seq_file *m = file->private_data;
        struct drm_device *dev = m->private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint16_t *latencies;
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4743,7 +4792,7 @@ static int
 i915_wedged_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        *val = i915_terminally_wedged(&dev_priv->gpu_error);
 
@@ -4754,7 +4803,7 @@ static int
 i915_wedged_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /*
         * There is no safeguard against this debugfs entry colliding
@@ -4769,7 +4818,7 @@ i915_wedged_set(void *data, u64 val)
 
        intel_runtime_pm_get(dev_priv);
 
-       i915_handle_error(dev, val,
+       i915_handle_error(dev_priv, val,
                          "Manually setting wedged to %llu", val);
 
        intel_runtime_pm_put(dev_priv);
@@ -4781,45 +4830,11 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
                        i915_wedged_get, i915_wedged_set,
                        "%llu\n");
 
-static int
-i915_ring_stop_get(void *data, u64 *val)
-{
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       *val = dev_priv->gpu_error.stop_rings;
-
-       return 0;
-}
-
-static int
-i915_ring_stop_set(void *data, u64 val)
-{
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       dev_priv->gpu_error.stop_rings = val;
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
-                       i915_ring_stop_get, i915_ring_stop_set,
-                       "0x%08llx\n");
-
 static int
 i915_ring_missed_irq_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        *val = dev_priv->gpu_error.missed_irq_rings;
        return 0;
@@ -4829,7 +4844,7 @@ static int
 i915_ring_missed_irq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        /* Lock against concurrent debugfs callers */
@@ -4850,7 +4865,7 @@ static int
 i915_ring_test_irq_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        *val = dev_priv->gpu_error.test_irq_rings;
 
@@ -4861,18 +4876,11 @@ static int
 i915_ring_test_irq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
+       val &= INTEL_INFO(dev_priv)->ring_mask;
        DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
-
-       /* Lock against concurrent debugfs callers */
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
        dev_priv->gpu_error.test_irq_rings = val;
-       mutex_unlock(&dev->struct_mutex);
 
        return 0;
 }
@@ -4901,7 +4909,7 @@ static int
 i915_drop_caches_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4913,13 +4921,13 @@ i915_drop_caches_set(void *data, u64 val)
                return ret;
 
        if (val & DROP_ACTIVE) {
-               ret = i915_gpu_idle(dev);
+               ret = i915_gem_wait_for_idle(dev_priv);
                if (ret)
                        goto unlock;
        }
 
        if (val & (DROP_RETIRE | DROP_ACTIVE))
-               i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev_priv);
 
        if (val & DROP_BOUND)
                i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
@@ -4941,7 +4949,7 @@ static int
 i915_max_freq_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        if (INTEL_INFO(dev)->gen < 6)
@@ -4963,7 +4971,7 @@ static int
 i915_max_freq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 hw_max, hw_min;
        int ret;
 
@@ -4993,7 +5001,7 @@ i915_max_freq_set(void *data, u64 val)
 
        dev_priv->rps.max_freq_softlimit = val;
 
-       intel_set_rps(dev, val);
+       intel_set_rps(dev_priv, val);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -5008,7 +5016,7 @@ static int
 i915_min_freq_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        if (INTEL_INFO(dev)->gen < 6)
@@ -5030,7 +5038,7 @@ static int
 i915_min_freq_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 hw_max, hw_min;
        int ret;
 
@@ -5060,7 +5068,7 @@ i915_min_freq_set(void *data, u64 val)
 
        dev_priv->rps.min_freq_softlimit = val;
 
-       intel_set_rps(dev, val);
+       intel_set_rps(dev_priv, val);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -5075,7 +5083,7 @@ static int
 i915_cache_sharing_get(void *data, u64 *val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 snpcr;
        int ret;
 
@@ -5090,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val)
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 
        intel_runtime_pm_put(dev_priv);
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
        *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
 
@@ -5101,7 +5109,7 @@ static int
 i915_cache_sharing_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 snpcr;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -5138,7 +5146,7 @@ struct sseu_dev_status {
 static void cherryview_sseu_device_status(struct drm_device *dev,
                                          struct sseu_dev_status *stat)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ss_max = 2;
        int ss;
        u32 sig1[ss_max], sig2[ss_max];
@@ -5170,7 +5178,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
 static void gen9_sseu_device_status(struct drm_device *dev,
                                    struct sseu_dev_status *stat)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int s_max = 3, ss_max = 4;
        int s, ss;
        u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
@@ -5235,7 +5243,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
 static void broadwell_sseu_device_status(struct drm_device *dev,
                                         struct sseu_dev_status *stat)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int s;
        u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
 
@@ -5277,6 +5285,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
                   INTEL_INFO(dev)->eu_total);
        seq_printf(m, "  Available EU Per Subslice: %u\n",
                   INTEL_INFO(dev)->eu_per_subslice);
+       seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
+       if (HAS_POOLED_EU(dev))
+               seq_printf(m, "  Min EU in pool: %u\n",
+                          INTEL_INFO(dev)->min_eu_in_pool);
        seq_printf(m, "  Has Slice Power Gating: %s\n",
                   yesno(INTEL_INFO(dev)->has_slice_pg));
        seq_printf(m, "  Has Subslice Power Gating: %s\n",
@@ -5310,7 +5322,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen < 6)
                return 0;
@@ -5324,7 +5336,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
 static int i915_forcewake_release(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen < 6)
                return 0;
@@ -5440,7 +5452,6 @@ static const struct i915_debugfs_files {
        {"i915_max_freq", &i915_max_freq_fops},
        {"i915_min_freq", &i915_min_freq_fops},
        {"i915_cache_sharing", &i915_cache_sharing_fops},
-       {"i915_ring_stop", &i915_ring_stop_fops},
        {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
        {"i915_ring_test_irq", &i915_ring_test_irq_fops},
        {"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -5458,7 +5469,7 @@ static const struct i915_debugfs_files {
 
 void intel_display_crc_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
 
        for_each_pipe(dev_priv, pipe) {
@@ -5470,8 +5481,9 @@ void intel_display_crc_init(struct drm_device *dev)
        }
 }
 
-int i915_debugfs_init(struct drm_minor *minor)
+int i915_debugfs_register(struct drm_i915_private *dev_priv)
 {
+       struct drm_minor *minor = dev_priv->drm.primary;
        int ret, i;
 
        ret = i915_forcewake_create(minor->debugfs_root, minor);
@@ -5497,8 +5509,9 @@ int i915_debugfs_init(struct drm_minor *minor)
                                        minor->debugfs_root, minor);
 }
 
-void i915_debugfs_cleanup(struct drm_minor *minor)
+void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
 {
+       struct drm_minor *minor = dev_priv->drm.primary;
        int i;
 
        drm_debugfs_remove_files(i915_debugfs_list,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
deleted file mode 100644 (file)
index b3198fc..0000000
+++ /dev/null
@@ -1,1587 +0,0 @@
-/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
- */
-/*
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_legacy.h>
-#include "intel_drv.h"
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_vgpu.h"
-#include "i915_trace.h"
-#include <linux/pci.h>
-#include <linux/console.h>
-#include <linux/vt.h>
-#include <linux/vgaarb.h>
-#include <linux/acpi.h>
-#include <linux/pnp.h>
-#include <linux/vga_switcheroo.h>
-#include <linux/slab.h>
-#include <acpi/video.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/oom.h>
-
-static unsigned int i915_load_fail_count;
-
-bool __i915_inject_load_failure(const char *func, int line)
-{
-       if (i915_load_fail_count >= i915.inject_load_failure)
-               return false;
-
-       if (++i915_load_fail_count == i915.inject_load_failure) {
-               DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
-                        i915.inject_load_failure, func, line);
-               return true;
-       }
-
-       return false;
-}
-
-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
-                   "providing the dmesg log by booting with drm.debug=0xf"
-
-void
-__i915_printk(struct drm_i915_private *dev_priv, const char *level,
-             const char *fmt, ...)
-{
-       static bool shown_bug_once;
-       struct device *dev = dev_priv->dev->dev;
-       bool is_error = level[1] <= KERN_ERR[1];
-       bool is_debug = level[1] == KERN_DEBUG[1];
-       struct va_format vaf;
-       va_list args;
-
-       if (is_debug && !(drm_debug & DRM_UT_DRIVER))
-               return;
-
-       va_start(args, fmt);
-
-       vaf.fmt = fmt;
-       vaf.va = &args;
-
-       dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
-                  __builtin_return_address(0), &vaf);
-
-       if (is_error && !shown_bug_once) {
-               dev_notice(dev, "%s", FDO_BUG_MSG);
-               shown_bug_once = true;
-       }
-
-       va_end(args);
-}
-
-static bool i915_error_injected(struct drm_i915_private *dev_priv)
-{
-       return i915.inject_load_failure &&
-              i915_load_fail_count == i915.inject_load_failure;
-}
-
-#define i915_load_error(dev_priv, fmt, ...)                                 \
-       __i915_printk(dev_priv,                                              \
-                     i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
-                     fmt, ##__VA_ARGS__)
-
-static int i915_getparam(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       drm_i915_getparam_t *param = data;
-       int value;
-
-       switch (param->param) {
-       case I915_PARAM_IRQ_ACTIVE:
-       case I915_PARAM_ALLOW_BATCHBUFFER:
-       case I915_PARAM_LAST_DISPATCH:
-               /* Reject all old ums/dri params. */
-               return -ENODEV;
-       case I915_PARAM_CHIPSET_ID:
-               value = dev->pdev->device;
-               break;
-       case I915_PARAM_REVISION:
-               value = dev->pdev->revision;
-               break;
-       case I915_PARAM_HAS_GEM:
-               value = 1;
-               break;
-       case I915_PARAM_NUM_FENCES_AVAIL:
-               value = dev_priv->num_fence_regs;
-               break;
-       case I915_PARAM_HAS_OVERLAY:
-               value = dev_priv->overlay ? 1 : 0;
-               break;
-       case I915_PARAM_HAS_PAGEFLIPPING:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXECBUF2:
-               /* depends on GEM */
-               value = 1;
-               break;
-       case I915_PARAM_HAS_BSD:
-               value = intel_engine_initialized(&dev_priv->engine[VCS]);
-               break;
-       case I915_PARAM_HAS_BLT:
-               value = intel_engine_initialized(&dev_priv->engine[BCS]);
-               break;
-       case I915_PARAM_HAS_VEBOX:
-               value = intel_engine_initialized(&dev_priv->engine[VECS]);
-               break;
-       case I915_PARAM_HAS_BSD2:
-               value = intel_engine_initialized(&dev_priv->engine[VCS2]);
-               break;
-       case I915_PARAM_HAS_RELAXED_FENCING:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_COHERENT_RINGS:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_CONSTANTS:
-               value = INTEL_INFO(dev)->gen >= 4;
-               break;
-       case I915_PARAM_HAS_RELAXED_DELTA:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_GEN7_SOL_RESET:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_LLC:
-               value = HAS_LLC(dev);
-               break;
-       case I915_PARAM_HAS_WT:
-               value = HAS_WT(dev);
-               break;
-       case I915_PARAM_HAS_ALIASING_PPGTT:
-               value = USES_PPGTT(dev);
-               break;
-       case I915_PARAM_HAS_WAIT_TIMEOUT:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_SEMAPHORES:
-               value = i915_semaphore_is_enabled(dev);
-               break;
-       case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_SECURE_BATCHES:
-               value = capable(CAP_SYS_ADMIN);
-               break;
-       case I915_PARAM_HAS_PINNED_BATCHES:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_NO_RELOC:
-               value = 1;
-               break;
-       case I915_PARAM_HAS_EXEC_HANDLE_LUT:
-               value = 1;
-               break;
-       case I915_PARAM_CMD_PARSER_VERSION:
-               value = i915_cmd_parser_get_version();
-               break;
-       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
-               value = 1;
-               break;
-       case I915_PARAM_MMAP_VERSION:
-               value = 1;
-               break;
-       case I915_PARAM_SUBSLICE_TOTAL:
-               value = INTEL_INFO(dev)->subslice_total;
-               if (!value)
-                       return -ENODEV;
-               break;
-       case I915_PARAM_EU_TOTAL:
-               value = INTEL_INFO(dev)->eu_total;
-               if (!value)
-                       return -ENODEV;
-               break;
-       case I915_PARAM_HAS_GPU_RESET:
-               value = i915.enable_hangcheck &&
-                       intel_has_gpu_reset(dev);
-               break;
-       case I915_PARAM_HAS_RESOURCE_STREAMER:
-               value = HAS_RESOURCE_STREAMER(dev);
-               break;
-       case I915_PARAM_HAS_EXEC_SOFTPIN:
-               value = 1;
-               break;
-       default:
-               DRM_DEBUG("Unknown parameter %d\n", param->param);
-               return -EINVAL;
-       }
-
-       if (copy_to_user(param->value, &value, sizeof(int))) {
-               DRM_ERROR("copy_to_user failed\n");
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-static int i915_get_bridge_dev(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
-       if (!dev_priv->bridge_dev) {
-               DRM_ERROR("bridge device not found\n");
-               return -1;
-       }
-       return 0;
-}
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-       u32 temp_lo, temp_hi = 0;
-       u64 mchbar_addr;
-       int ret;
-
-       if (INTEL_INFO(dev)->gen >= 4)
-               pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
-       pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
-       mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
-       /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
-       if (mchbar_addr &&
-           pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
-               return 0;
-#endif
-
-       /* Get some space for it */
-       dev_priv->mch_res.name = "i915 MCHBAR";
-       dev_priv->mch_res.flags = IORESOURCE_MEM;
-       ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
-                                    &dev_priv->mch_res,
-                                    MCHBAR_SIZE, MCHBAR_SIZE,
-                                    PCIBIOS_MIN_MEM,
-                                    0, pcibios_align_resource,
-                                    dev_priv->bridge_dev);
-       if (ret) {
-               DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
-               dev_priv->mch_res.start = 0;
-               return ret;
-       }
-
-       if (INTEL_INFO(dev)->gen >= 4)
-               pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
-                                      upper_32_bits(dev_priv->mch_res.start));
-
-       pci_write_config_dword(dev_priv->bridge_dev, reg,
-                              lower_32_bits(dev_priv->mch_res.start));
-       return 0;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static void
-intel_setup_mchbar(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-       u32 temp;
-       bool enabled;
-
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               return;
-
-       dev_priv->mchbar_need_disable = false;
-
-       if (IS_I915G(dev) || IS_I915GM(dev)) {
-               pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
-               enabled = !!(temp & DEVEN_MCHBAR_EN);
-       } else {
-               pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-               enabled = temp & 1;
-       }
-
-       /* If it's already enabled, don't have to do anything */
-       if (enabled)
-               return;
-
-       if (intel_alloc_mchbar_resource(dev))
-               return;
-
-       dev_priv->mchbar_need_disable = true;
-
-       /* Space is allocated or reserved, so enable it. */
-       if (IS_I915G(dev) || IS_I915GM(dev)) {
-               pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
-                                      temp | DEVEN_MCHBAR_EN);
-       } else {
-               pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
-               pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
-       }
-}
-
-static void
-intel_teardown_mchbar(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
-       if (dev_priv->mchbar_need_disable) {
-               if (IS_I915G(dev) || IS_I915GM(dev)) {
-                       u32 deven_val;
-
-                       pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
-                                             &deven_val);
-                       deven_val &= ~DEVEN_MCHBAR_EN;
-                       pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
-                                              deven_val);
-               } else {
-                       u32 mchbar_val;
-
-                       pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
-                                             &mchbar_val);
-                       mchbar_val &= ~1;
-                       pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
-                                              mchbar_val);
-               }
-       }
-
-       if (dev_priv->mch_res.start)
-               release_resource(&dev_priv->mch_res);
-}
-
-/* true = enable decode, false = disable decoder */
-static unsigned int i915_vga_set_decode(void *cookie, bool state)
-{
-       struct drm_device *dev = cookie;
-
-       intel_modeset_vga_set_state(dev, state);
-       if (state)
-               return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
-                      VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-       else
-               return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
-}
-
-static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
-{
-       struct drm_device *dev = pci_get_drvdata(pdev);
-       pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
-
-       if (state == VGA_SWITCHEROO_ON) {
-               pr_info("switched on\n");
-               dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               /* i915 resume handler doesn't set to D0 */
-               pci_set_power_state(dev->pdev, PCI_D0);
-               i915_resume_switcheroo(dev);
-               dev->switch_power_state = DRM_SWITCH_POWER_ON;
-       } else {
-               pr_info("switched off\n");
-               dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               i915_suspend_switcheroo(dev, pmm);
-               dev->switch_power_state = DRM_SWITCH_POWER_OFF;
-       }
-}
-
-static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
-{
-       struct drm_device *dev = pci_get_drvdata(pdev);
-
-       /*
-        * FIXME: open_count is protected by drm_global_mutex but that would lead to
-        * locking inversion with the driver load path. And the access here is
-        * completely racy anyway. So don't bother with locking for now.
-        */
-       return dev->open_count == 0;
-}
-
-static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
-       .set_gpu_state = i915_switcheroo_set_state,
-       .reprobe = NULL,
-       .can_switch = i915_switcheroo_can_switch,
-};
-
-static int i915_load_modeset_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       if (i915_inject_load_failure())
-               return -ENODEV;
-
-       ret = intel_bios_init(dev_priv);
-       if (ret)
-               DRM_INFO("failed to find VBIOS tables\n");
-
-       /* If we have > 1 VGA cards, then we need to arbitrate access
-        * to the common VGA resources.
-        *
-        * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
-        * then we do not take part in VGA arbitration and the
-        * vga_client_register() fails with -ENODEV.
-        */
-       ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
-       if (ret && ret != -ENODEV)
-               goto out;
-
-       intel_register_dsm_handler();
-
-       ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
-       if (ret)
-               goto cleanup_vga_client;
-
-       intel_power_domains_init_hw(dev_priv, false);
-
-       intel_csr_ucode_init(dev_priv);
-
-       ret = intel_irq_install(dev_priv);
-       if (ret)
-               goto cleanup_csr;
-
-       intel_setup_gmbus(dev);
-
-       /* Important: The output setup functions called by modeset_init need
-        * working irqs for e.g. gmbus and dp aux transfers. */
-       intel_modeset_init(dev);
-
-       intel_guc_ucode_init(dev);
-
-       ret = i915_gem_init(dev);
-       if (ret)
-               goto cleanup_irq;
-
-       intel_modeset_gem_init(dev);
-
-       if (INTEL_INFO(dev)->num_pipes == 0)
-               return 0;
-
-       ret = intel_fbdev_init(dev);
-       if (ret)
-               goto cleanup_gem;
-
-       /* Only enable hotplug handling once the fbdev is fully set up. */
-       intel_hpd_init(dev_priv);
-
-       /*
-        * Some ports require correctly set-up hpd registers for detection to
-        * work properly (leading to ghost connected connector status), e.g. VGA
-        * on gm45.  Hence we can only set up the initial fbdev config after hpd
-        * irqs are fully enabled. Now we should scan for the initial config
-        * only once hotplug handling is enabled, but due to screwed-up locking
-        * around kms/fbdev init we can't protect the fdbev initial config
-        * scanning against hotplug events. Hence do this first and ignore the
-        * tiny window where we will loose hotplug notifactions.
-        */
-       intel_fbdev_initial_config_async(dev);
-
-       drm_kms_helper_poll_init(dev);
-
-       return 0;
-
-cleanup_gem:
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_engines(dev);
-       i915_gem_context_fini(dev);
-       mutex_unlock(&dev->struct_mutex);
-cleanup_irq:
-       intel_guc_ucode_fini(dev);
-       drm_irq_uninstall(dev);
-       intel_teardown_gmbus(dev);
-cleanup_csr:
-       intel_csr_ucode_fini(dev_priv);
-       intel_power_domains_fini(dev_priv);
-       vga_switcheroo_unregister_client(dev->pdev);
-cleanup_vga_client:
-       vga_client_register(dev->pdev, NULL, NULL, NULL);
-out:
-       return ret;
-}
-
-#if IS_ENABLED(CONFIG_FB)
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
-{
-       struct apertures_struct *ap;
-       struct pci_dev *pdev = dev_priv->dev->pdev;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       bool primary;
-       int ret;
-
-       ap = alloc_apertures(1);
-       if (!ap)
-               return -ENOMEM;
-
-       ap->ranges[0].base = ggtt->mappable_base;
-       ap->ranges[0].size = ggtt->mappable_end;
-
-       primary =
-               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-
-       ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
-
-       kfree(ap);
-
-       return ret;
-}
-#else
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
-{
-       return 0;
-}
-#endif
-
-#if !defined(CONFIG_VGA_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
-       return 0;
-}
-#elif !defined(CONFIG_DUMMY_CONSOLE)
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
-       return -ENODEV;
-}
-#else
-static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
-{
-       int ret = 0;
-
-       DRM_INFO("Replacing VGA console driver\n");
-
-       console_lock();
-       if (con_is_bound(&vga_con))
-               ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
-       if (ret == 0) {
-               ret = do_unregister_con_driver(&vga_con);
-
-               /* Ignore "already unregistered". */
-               if (ret == -ENODEV)
-                       ret = 0;
-       }
-       console_unlock();
-
-       return ret;
-}
-#endif
-
-static void i915_dump_device_info(struct drm_i915_private *dev_priv)
-{
-       const struct intel_device_info *info = &dev_priv->info;
-
-#define PRINT_S(name) "%s"
-#define SEP_EMPTY
-#define PRINT_FLAG(name) info->name ? #name "," : ""
-#define SEP_COMMA ,
-       DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
-                        DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
-                        info->gen,
-                        dev_priv->dev->pdev->device,
-                        dev_priv->dev->pdev->revision,
-                        DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
-#undef PRINT_S
-#undef SEP_EMPTY
-#undef PRINT_FLAG
-#undef SEP_COMMA
-}
-
-static void cherryview_sseu_info_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_device_info *info;
-       u32 fuse, eu_dis;
-
-       info = (struct intel_device_info *)&dev_priv->info;
-       fuse = I915_READ(CHV_FUSE_GT);
-
-       info->slice_total = 1;
-
-       if (!(fuse & CHV_FGT_DISABLE_SS0)) {
-               info->subslice_per_slice++;
-               eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
-                                CHV_FGT_EU_DIS_SS0_R1_MASK);
-               info->eu_total += 8 - hweight32(eu_dis);
-       }
-
-       if (!(fuse & CHV_FGT_DISABLE_SS1)) {
-               info->subslice_per_slice++;
-               eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
-                                CHV_FGT_EU_DIS_SS1_R1_MASK);
-               info->eu_total += 8 - hweight32(eu_dis);
-       }
-
-       info->subslice_total = info->subslice_per_slice;
-       /*
-        * CHV expected to always have a uniform distribution of EU
-        * across subslices.
-       */
-       info->eu_per_subslice = info->subslice_total ?
-                               info->eu_total / info->subslice_total :
-                               0;
-       /*
-        * CHV supports subslice power gating on devices with more than
-        * one subslice, and supports EU power gating on devices with
-        * more than one EU pair per subslice.
-       */
-       info->has_slice_pg = 0;
-       info->has_subslice_pg = (info->subslice_total > 1);
-       info->has_eu_pg = (info->eu_per_subslice > 2);
-}
-
-static void gen9_sseu_info_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_device_info *info;
-       int s_max = 3, ss_max = 4, eu_max = 8;
-       int s, ss;
-       u32 fuse2, s_enable, ss_disable, eu_disable;
-       u8 eu_mask = 0xff;
-
-       info = (struct intel_device_info *)&dev_priv->info;
-       fuse2 = I915_READ(GEN8_FUSE2);
-       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
-                  GEN8_F2_S_ENA_SHIFT;
-       ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
-                    GEN9_F2_SS_DIS_SHIFT;
-
-       info->slice_total = hweight32(s_enable);
-       /*
-        * The subslice disable field is global, i.e. it applies
-        * to each of the enabled slices.
-       */
-       info->subslice_per_slice = ss_max - hweight32(ss_disable);
-       info->subslice_total = info->slice_total *
-                              info->subslice_per_slice;
-
-       /*
-        * Iterate through enabled slices and subslices to
-        * count the total enabled EU.
-       */
-       for (s = 0; s < s_max; s++) {
-               if (!(s_enable & (0x1 << s)))
-                       /* skip disabled slice */
-                       continue;
-
-               eu_disable = I915_READ(GEN9_EU_DISABLE(s));
-               for (ss = 0; ss < ss_max; ss++) {
-                       int eu_per_ss;
-
-                       if (ss_disable & (0x1 << ss))
-                               /* skip disabled subslice */
-                               continue;
-
-                       eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
-                                                     eu_mask);
-
-                       /*
-                        * Record which subslice(s) has(have) 7 EUs. we
-                        * can tune the hash used to spread work among
-                        * subslices if they are unbalanced.
-                        */
-                       if (eu_per_ss == 7)
-                               info->subslice_7eu[s] |= 1 << ss;
-
-                       info->eu_total += eu_per_ss;
-               }
-       }
-
-       /*
-        * SKL is expected to always have a uniform distribution
-        * of EU across subslices with the exception that any one
-        * EU in any one subslice may be fused off for die
-        * recovery. BXT is expected to be perfectly uniform in EU
-        * distribution.
-       */
-       info->eu_per_subslice = info->subslice_total ?
-                               DIV_ROUND_UP(info->eu_total,
-                                            info->subslice_total) : 0;
-       /*
-        * SKL supports slice power gating on devices with more than
-        * one slice, and supports EU power gating on devices with
-        * more than one EU pair per subslice. BXT supports subslice
-        * power gating on devices with more than one subslice, and
-        * supports EU power gating on devices with more than one EU
-        * pair per subslice.
-       */
-       info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
-                              (info->slice_total > 1));
-       info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
-       info->has_eu_pg = (info->eu_per_subslice > 2);
-}
-
-static void broadwell_sseu_info_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_device_info *info;
-       const int s_max = 3, ss_max = 3, eu_max = 8;
-       int s, ss;
-       u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
-
-       fuse2 = I915_READ(GEN8_FUSE2);
-       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
-       ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
-
-       eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
-       eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
-                       ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
-                        (32 - GEN8_EU_DIS0_S1_SHIFT));
-       eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
-                       ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
-                        (32 - GEN8_EU_DIS1_S2_SHIFT));
-
-
-       info = (struct intel_device_info *)&dev_priv->info;
-       info->slice_total = hweight32(s_enable);
-
-       /*
-        * The subslice disable field is global, i.e. it applies
-        * to each of the enabled slices.
-        */
-       info->subslice_per_slice = ss_max - hweight32(ss_disable);
-       info->subslice_total = info->slice_total * info->subslice_per_slice;
-
-       /*
-        * Iterate through enabled slices and subslices to
-        * count the total enabled EU.
-        */
-       for (s = 0; s < s_max; s++) {
-               if (!(s_enable & (0x1 << s)))
-                       /* skip disabled slice */
-                       continue;
-
-               for (ss = 0; ss < ss_max; ss++) {
-                       u32 n_disabled;
-
-                       if (ss_disable & (0x1 << ss))
-                               /* skip disabled subslice */
-                               continue;
-
-                       n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
-
-                       /*
-                        * Record which subslices have 7 EUs.
-                        */
-                       if (eu_max - n_disabled == 7)
-                               info->subslice_7eu[s] |= 1 << ss;
-
-                       info->eu_total += eu_max - n_disabled;
-               }
-       }
-
-       /*
-        * BDW is expected to always have a uniform distribution of EU across
-        * subslices with the exception that any one EU in any one subslice may
-        * be fused off for die recovery.
-        */
-       info->eu_per_subslice = info->subslice_total ?
-               DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
-
-       /*
-        * BDW supports slice power gating on devices with more than
-        * one slice.
-        */
-       info->has_slice_pg = (info->slice_total > 1);
-       info->has_subslice_pg = 0;
-       info->has_eu_pg = 0;
-}
-
-/*
- * Determine various intel_device_info fields at runtime.
- *
- * Use it when either:
- *   - it's judged too laborious to fill n static structures with the limit
- *     when a simple if statement does the job,
- *   - run-time checks (eg read fuse/strap registers) are needed.
- *
- * This function needs to be called:
- *   - after the MMIO has been setup as we are reading registers,
- *   - after the PCH has been detected,
- *   - before the first usage of the fields it can tweak.
- */
-static void intel_device_info_runtime_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_device_info *info;
-       enum pipe pipe;
-
-       info = (struct intel_device_info *)&dev_priv->info;
-
-       /*
-        * Skylake and Broxton currently don't expose the topmost plane as its
-        * use is exclusive with the legacy cursor and we only want to expose
-        * one of those, not both. Until we can safely expose the topmost plane
-        * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
-        * we don't expose the topmost plane at all to prevent ABI breakage
-        * down the line.
-        */
-       if (IS_BROXTON(dev)) {
-               info->num_sprites[PIPE_A] = 2;
-               info->num_sprites[PIPE_B] = 2;
-               info->num_sprites[PIPE_C] = 1;
-       } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 2;
-       else
-               for_each_pipe(dev_priv, pipe)
-                       info->num_sprites[pipe] = 1;
-
-       if (i915.disable_display) {
-               DRM_INFO("Display disabled (module parameter)\n");
-               info->num_pipes = 0;
-       } else if (info->num_pipes > 0 &&
-                  (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
-                  HAS_PCH_SPLIT(dev)) {
-               u32 fuse_strap = I915_READ(FUSE_STRAP);
-               u32 sfuse_strap = I915_READ(SFUSE_STRAP);
-
-               /*
-                * SFUSE_STRAP is supposed to have a bit signalling the display
-                * is fused off. Unfortunately it seems that, at least in
-                * certain cases, fused off display means that PCH display
-                * reads don't land anywhere. In that case, we read 0s.
-                *
-                * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
-                * should be set when taking over after the firmware.
-                */
-               if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
-                   sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
-                   (dev_priv->pch_type == PCH_CPT &&
-                    !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
-                       DRM_INFO("Display fused off, disabling\n");
-                       info->num_pipes = 0;
-               } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
-                       DRM_INFO("PipeC fused off\n");
-                       info->num_pipes -= 1;
-               }
-       } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
-               u32 dfsm = I915_READ(SKL_DFSM);
-               u8 disabled_mask = 0;
-               bool invalid;
-               int num_bits;
-
-               if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
-                       disabled_mask |= BIT(PIPE_A);
-               if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
-                       disabled_mask |= BIT(PIPE_B);
-               if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
-                       disabled_mask |= BIT(PIPE_C);
-
-               num_bits = hweight8(disabled_mask);
-
-               switch (disabled_mask) {
-               case BIT(PIPE_A):
-               case BIT(PIPE_B):
-               case BIT(PIPE_A) | BIT(PIPE_B):
-               case BIT(PIPE_A) | BIT(PIPE_C):
-                       invalid = true;
-                       break;
-               default:
-                       invalid = false;
-               }
-
-               if (num_bits > info->num_pipes || invalid)
-                       DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
-                                 disabled_mask);
-               else
-                       info->num_pipes -= num_bits;
-       }
-
-       /* Initialize slice/subslice/EU info */
-       if (IS_CHERRYVIEW(dev))
-               cherryview_sseu_info_init(dev);
-       else if (IS_BROADWELL(dev))
-               broadwell_sseu_info_init(dev);
-       else if (INTEL_INFO(dev)->gen >= 9)
-               gen9_sseu_info_init(dev);
-
-       /* Snooping is broken on BXT A stepping. */
-       info->has_snoop = !info->has_llc;
-       info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
-
-       DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
-       DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
-       DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
-       DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
-       DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
-       DRM_DEBUG_DRIVER("has slice power gating: %s\n",
-                        info->has_slice_pg ? "y" : "n");
-       DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
-                        info->has_subslice_pg ? "y" : "n");
-       DRM_DEBUG_DRIVER("has EU power gating: %s\n",
-                        info->has_eu_pg ? "y" : "n");
-}
-
-static void intel_init_dpio(struct drm_i915_private *dev_priv)
-{
-       /*
-        * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
-        * CHV x1 PHY (DP/HDMI D)
-        * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
-        */
-       if (IS_CHERRYVIEW(dev_priv)) {
-               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
-               DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
-       } else if (IS_VALLEYVIEW(dev_priv)) {
-               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
-       }
-}
-
-static int i915_workqueues_init(struct drm_i915_private *dev_priv)
-{
-       /*
-        * The i915 workqueue is primarily used for batched retirement of
-        * requests (and thus managing bo) once the task has been completed
-        * by the GPU. i915_gem_retire_requests() is called directly when we
-        * need high-priority retirement, such as waiting for an explicit
-        * bo.
-        *
-        * It is also used for periodic low-priority events, such as
-        * idle-timers and recording error state.
-        *
-        * All tasks on the workqueue are expected to acquire the dev mutex
-        * so there is no point in running more than one instance of the
-        * workqueue at any time.  Use an ordered one.
-        */
-       dev_priv->wq = alloc_ordered_workqueue("i915", 0);
-       if (dev_priv->wq == NULL)
-               goto out_err;
-
-       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-       if (dev_priv->hotplug.dp_wq == NULL)
-               goto out_free_wq;
-
-       dev_priv->gpu_error.hangcheck_wq =
-               alloc_ordered_workqueue("i915-hangcheck", 0);
-       if (dev_priv->gpu_error.hangcheck_wq == NULL)
-               goto out_free_dp_wq;
-
-       return 0;
-
-out_free_dp_wq:
-       destroy_workqueue(dev_priv->hotplug.dp_wq);
-out_free_wq:
-       destroy_workqueue(dev_priv->wq);
-out_err:
-       DRM_ERROR("Failed to allocate workqueues.\n");
-
-       return -ENOMEM;
-}
-
-static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
-{
-       destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
-       destroy_workqueue(dev_priv->hotplug.dp_wq);
-       destroy_workqueue(dev_priv->wq);
-}
-
-/**
- * i915_driver_init_early - setup state not requiring device access
- * @dev_priv: device private
- *
- * Initialize everything that is a "SW-only" state, that is state not
- * requiring accessing the device or exposing the driver via kernel internal
- * or userspace interfaces. Example steps belonging here: lock initialization,
- * system memory allocation, setting up device specific attributes and
- * function hooks not requiring accessing the device.
- */
-static int i915_driver_init_early(struct drm_i915_private *dev_priv,
-                                 struct drm_device *dev,
-                                 struct intel_device_info *info)
-{
-       struct intel_device_info *device_info;
-       int ret = 0;
-
-       if (i915_inject_load_failure())
-               return -ENODEV;
-
-       /* Setup the write-once "constant" device info */
-       device_info = (struct intel_device_info *)&dev_priv->info;
-       memcpy(device_info, info, sizeof(dev_priv->info));
-       device_info->device_id = dev->pdev->device;
-
-       spin_lock_init(&dev_priv->irq_lock);
-       spin_lock_init(&dev_priv->gpu_error.lock);
-       mutex_init(&dev_priv->backlight_lock);
-       spin_lock_init(&dev_priv->uncore.lock);
-       spin_lock_init(&dev_priv->mm.object_stat_lock);
-       spin_lock_init(&dev_priv->mmio_flip_lock);
-       mutex_init(&dev_priv->sb_lock);
-       mutex_init(&dev_priv->modeset_restore_lock);
-       mutex_init(&dev_priv->av_mutex);
-       mutex_init(&dev_priv->wm.wm_mutex);
-       mutex_init(&dev_priv->pps_mutex);
-
-       ret = i915_workqueues_init(dev_priv);
-       if (ret < 0)
-               return ret;
-
-       /* This must be called before any calls to HAS_PCH_* */
-       intel_detect_pch(dev);
-
-       intel_pm_setup(dev);
-       intel_init_dpio(dev_priv);
-       intel_power_domains_init(dev_priv);
-       intel_irq_init(dev_priv);
-       intel_init_display_hooks(dev_priv);
-       intel_init_clock_gating_hooks(dev_priv);
-       intel_init_audio_hooks(dev_priv);
-       i915_gem_load_init(dev);
-
-       intel_display_crc_init(dev);
-
-       i915_dump_device_info(dev_priv);
-
-       /* Not all pre-production machines fall into this category, only the
-        * very first ones. Almost everything should work, except for maybe
-        * suspend/resume. And we don't implement workarounds that affect only
-        * pre-production machines. */
-       if (IS_HSW_EARLY_SDV(dev))
-               DRM_INFO("This is an early pre-production Haswell machine. "
-                        "It may not be fully functional.\n");
-
-       return 0;
-}
-
-/**
- * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
- * @dev_priv: device private
- */
-static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
-{
-       i915_gem_load_cleanup(dev_priv->dev);
-       i915_workqueues_cleanup(dev_priv);
-}
-
-static int i915_mmio_setup(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int mmio_bar;
-       int mmio_size;
-
-       mmio_bar = IS_GEN2(dev) ? 1 : 0;
-       /*
-        * Before gen4, the registers and the GTT are behind different BARs.
-        * However, from gen4 onwards, the registers and the GTT are shared
-        * in the same BAR, so we want to restrict this ioremap from
-        * clobbering the GTT which we want ioremap_wc instead. Fortunately,
-        * the register BAR remains the same size for all the earlier
-        * generations up to Ironlake.
-        */
-       if (INTEL_INFO(dev)->gen < 5)
-               mmio_size = 512 * 1024;
-       else
-               mmio_size = 2 * 1024 * 1024;
-       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
-       if (dev_priv->regs == NULL) {
-               DRM_ERROR("failed to map registers\n");
-
-               return -EIO;
-       }
-
-       /* Try to make sure MCHBAR is enabled before poking at it */
-       intel_setup_mchbar(dev);
-
-       return 0;
-}
-
-static void i915_mmio_cleanup(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
-       intel_teardown_mchbar(dev);
-       pci_iounmap(dev->pdev, dev_priv->regs);
-}
-
-/**
- * i915_driver_init_mmio - setup device MMIO
- * @dev_priv: device private
- *
- * Setup minimal device state necessary for MMIO accesses later in the
- * initialization sequence. The setup here should avoid any other device-wide
- * side effects or exposing the driver via kernel internal or user space
- * interfaces.
- */
-static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       int ret;
-
-       if (i915_inject_load_failure())
-               return -ENODEV;
-
-       if (i915_get_bridge_dev(dev))
-               return -EIO;
-
-       ret = i915_mmio_setup(dev);
-       if (ret < 0)
-               goto put_bridge;
-
-       intel_uncore_init(dev);
-
-       return 0;
-
-put_bridge:
-       pci_dev_put(dev_priv->bridge_dev);
-
-       return ret;
-}
-
-/**
- * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
- * @dev_priv: device private
- */
-static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-
-       intel_uncore_fini(dev);
-       i915_mmio_cleanup(dev);
-       pci_dev_put(dev_priv->bridge_dev);
-}
-
-/**
- * i915_driver_init_hw - setup state requiring device access
- * @dev_priv: device private
- *
- * Setup state that requires accessing the device, but doesn't require
- * exposing the driver via kernel internal or userspace interfaces.
- */
-static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       uint32_t aperture_size;
-       int ret;
-
-       if (i915_inject_load_failure())
-               return -ENODEV;
-
-       intel_device_info_runtime_init(dev);
-
-       ret = i915_ggtt_init_hw(dev);
-       if (ret)
-               return ret;
-
-       ret = i915_ggtt_enable_hw(dev);
-       if (ret) {
-               DRM_ERROR("failed to enable GGTT\n");
-               goto out_ggtt;
-       }
-
-       /* WARNING: Apparently we must kick fbdev drivers before vgacon,
-        * otherwise the vga fbdev driver falls over. */
-       ret = i915_kick_out_firmware_fb(dev_priv);
-       if (ret) {
-               DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
-               goto out_ggtt;
-       }
-
-       ret = i915_kick_out_vgacon(dev_priv);
-       if (ret) {
-               DRM_ERROR("failed to remove conflicting VGA console\n");
-               goto out_ggtt;
-       }
-
-       pci_set_master(dev->pdev);
-
-       /* overlay on gen2 is broken and can't address above 1G */
-       if (IS_GEN2(dev))
-               dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
-
-       /* 965GM sometimes incorrectly writes to hardware status page (HWS)
-        * using 32bit addressing, overwriting memory if HWS is located
-        * above 4GB.
-        *
-        * The documentation also mentions an issue with undefined
-        * behaviour if any general state is accessed within a page above 4GB,
-        * which also needs to be handled carefully.
-        */
-       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
-               dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
-
-       aperture_size = ggtt->mappable_end;
-
-       ggtt->mappable =
-               io_mapping_create_wc(ggtt->mappable_base,
-                                    aperture_size);
-       if (!ggtt->mappable) {
-               ret = -EIO;
-               goto out_ggtt;
-       }
-
-       ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
-                                             aperture_size);
-
-       pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
-                          PM_QOS_DEFAULT_VALUE);
-
-       intel_uncore_sanitize(dev);
-
-       intel_opregion_setup(dev);
-
-       i915_gem_load_init_fences(dev_priv);
-
-       /* On the 945G/GM, the chipset reports the MSI capability on the
-        * integrated graphics even though the support isn't actually there
-        * according to the published specs.  It doesn't appear to function
-        * correctly in testing on 945G.
-        * This may be a side effect of MSI having been made available for PEG
-        * and the registers being closely associated.
-        *
-        * According to chipset errata, on the 965GM, MSI interrupts may
-        * be lost or delayed, but we use them anyways to avoid
-        * stuck interrupts on some machines.
-        */
-       if (!IS_I945G(dev) && !IS_I945GM(dev)) {
-               if (pci_enable_msi(dev->pdev) < 0)
-                       DRM_DEBUG_DRIVER("can't enable MSI");
-       }
-
-       return 0;
-
-out_ggtt:
-       i915_ggtt_cleanup_hw(dev);
-
-       return ret;
-}
-
-/**
- * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
- * @dev_priv: device private
- */
-static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-       if (dev->pdev->msi_enabled)
-               pci_disable_msi(dev->pdev);
-
-       pm_qos_remove_request(&dev_priv->pm_qos);
-       arch_phys_wc_del(ggtt->mtrr);
-       io_mapping_free(ggtt->mappable);
-       i915_ggtt_cleanup_hw(dev);
-}
-
-/**
- * i915_driver_register - register the driver with the rest of the system
- * @dev_priv: device private
- *
- * Perform any steps necessary to make the driver available via kernel
- * internal or userspace interfaces.
- */
-static void i915_driver_register(struct drm_i915_private *dev_priv)
-{
-       struct drm_device *dev = dev_priv->dev;
-
-       i915_gem_shrinker_init(dev_priv);
-       /*
-        * Notify a valid surface after modesetting,
-        * when running inside a VM.
-        */
-       if (intel_vgpu_active(dev))
-               I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
-
-       i915_setup_sysfs(dev);
-
-       if (INTEL_INFO(dev_priv)->num_pipes) {
-               /* Must be done after probing outputs */
-               intel_opregion_init(dev);
-               acpi_video_register();
-       }
-
-       if (IS_GEN5(dev_priv))
-               intel_gpu_ips_init(dev_priv);
-
-       i915_audio_component_init(dev_priv);
-}
-
-/**
- * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
- * @dev_priv: device private
- */
-static void i915_driver_unregister(struct drm_i915_private *dev_priv)
-{
-       i915_audio_component_cleanup(dev_priv);
-       intel_gpu_ips_teardown();
-       acpi_video_unregister();
-       intel_opregion_fini(dev_priv->dev);
-       i915_teardown_sysfs(dev_priv->dev);
-       i915_gem_shrinker_cleanup(dev_priv);
-}
-
-/**
- * i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
- *
- * The driver load routine has to do several things:
- *   - drive output discovery via intel_modeset_init()
- *   - initialize the memory manager
- *   - allocate initial config memory
- *   - setup the DRM framebuffer with the allocated memory
- */
-int i915_driver_load(struct drm_device *dev, unsigned long flags)
-{
-       struct drm_i915_private *dev_priv;
-       int ret = 0;
-
-       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
-       if (dev_priv == NULL)
-               return -ENOMEM;
-
-       dev->dev_private = dev_priv;
-       /* Must be set before calling __i915_printk */
-       dev_priv->dev = dev;
-
-       ret = i915_driver_init_early(dev_priv, dev,
-                                    (struct intel_device_info *)flags);
-
-       if (ret < 0)
-               goto out_free_priv;
-
-       intel_runtime_pm_get(dev_priv);
-
-       ret = i915_driver_init_mmio(dev_priv);
-       if (ret < 0)
-               goto out_runtime_pm_put;
-
-       ret = i915_driver_init_hw(dev_priv);
-       if (ret < 0)
-               goto out_cleanup_mmio;
-
-       /*
-        * TODO: move the vblank init and parts of modeset init steps into one
-        * of the i915_driver_init_/i915_driver_register functions according
-        * to the role/effect of the given init step.
-        */
-       if (INTEL_INFO(dev)->num_pipes) {
-               ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
-               if (ret)
-                       goto out_cleanup_hw;
-       }
-
-       ret = i915_load_modeset_init(dev);
-       if (ret < 0)
-               goto out_cleanup_vblank;
-
-       i915_driver_register(dev_priv);
-
-       intel_runtime_pm_enable(dev_priv);
-
-       intel_runtime_pm_put(dev_priv);
-
-       return 0;
-
-out_cleanup_vblank:
-       drm_vblank_cleanup(dev);
-out_cleanup_hw:
-       i915_driver_cleanup_hw(dev_priv);
-out_cleanup_mmio:
-       i915_driver_cleanup_mmio(dev_priv);
-out_runtime_pm_put:
-       intel_runtime_pm_put(dev_priv);
-       i915_driver_cleanup_early(dev_priv);
-out_free_priv:
-       i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
-
-       kfree(dev_priv);
-
-       return ret;
-}
-
-int i915_driver_unload(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       intel_fbdev_fini(dev);
-
-       ret = i915_gem_suspend(dev);
-       if (ret) {
-               DRM_ERROR("failed to idle hardware: %d\n", ret);
-               return ret;
-       }
-
-       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
-       i915_driver_unregister(dev_priv);
-
-       drm_vblank_cleanup(dev);
-
-       intel_modeset_cleanup(dev);
-
-       /*
-        * free the memory space allocated for the child device
-        * config parsed from VBT
-        */
-       if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
-               kfree(dev_priv->vbt.child_dev);
-               dev_priv->vbt.child_dev = NULL;
-               dev_priv->vbt.child_dev_num = 0;
-       }
-       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
-       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
-       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
-       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
-
-       vga_switcheroo_unregister_client(dev->pdev);
-       vga_client_register(dev->pdev, NULL, NULL, NULL);
-
-       intel_csr_ucode_fini(dev_priv);
-
-       /* Free error state after interrupts are fully disabled. */
-       cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-       i915_destroy_error_state(dev);
-
-       /* Flush any outstanding unpin_work. */
-       flush_workqueue(dev_priv->wq);
-
-       intel_guc_ucode_fini(dev);
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_engines(dev);
-       i915_gem_context_fini(dev);
-       mutex_unlock(&dev->struct_mutex);
-       intel_fbc_cleanup_cfb(dev_priv);
-
-       intel_power_domains_fini(dev_priv);
-
-       i915_driver_cleanup_hw(dev_priv);
-       i915_driver_cleanup_mmio(dev_priv);
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
-
-       i915_driver_cleanup_early(dev_priv);
-       kfree(dev_priv);
-
-       return 0;
-}
-
-int i915_driver_open(struct drm_device *dev, struct drm_file *file)
-{
-       int ret;
-
-       ret = i915_gem_open(dev, file);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-/**
- * i915_driver_lastclose - clean up after all DRM clients have exited
- * @dev: DRM device
- *
- * Take care of cleaning up after all DRM clients have exited.  In the
- * mode setting case, we want to restore the kernel's initial mode (just
- * in case the last client left us in a bad state).
- *
- * Additionally, in the non-mode setting case, we'll tear down the GTT
- * and DMA structures, since the kernel won't be using them, and clea
- * up any GEM state.
- */
-void i915_driver_lastclose(struct drm_device *dev)
-{
-       intel_fbdev_restore_mode(dev);
-       vga_switcheroo_process_delayed_switch();
-}
-
-void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
-{
-       mutex_lock(&dev->struct_mutex);
-       i915_gem_context_close(dev, file);
-       i915_gem_release(dev, file);
-       mutex_unlock(&dev->struct_mutex);
-}
-
-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
-       struct drm_i915_file_private *file_priv = file->driver_priv;
-
-       kfree(file_priv);
-}
-
-static int
-i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
-                         struct drm_file *file)
-{
-       return -ENODEV;
-}
-
-const struct drm_ioctl_desc i915_ioctls[] = {
-       DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
-};
-
-int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
index f313b4d8344f4e6abf8f0784ae9bf98abdc5eb9e..95ddd56b89f08954a4c767e149dad2d427a5de27 100644 (file)
  *
  */
 
-#include <linux/device.h>
 #include <linux/acpi.h>
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
-
-#include <linux/apple-gmux.h>
-#include <linux/console.h>
+#include <linux/device.h>
+#include <linux/oom.h>
 #include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
 #include <linux/pm_runtime.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
 #include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
+#include <linux/vt.h>
+#include <acpi/video.h>
+
+#include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+#include "intel_drv.h"
 
 static struct drm_driver driver;
 
-#define GEN_DEFAULT_PIPEOFFSETS \
-       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-                         PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
-       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-                          TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
-       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
-
-#define GEN_CHV_PIPEOFFSETS \
-       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
-                         CHV_PIPE_C_OFFSET }, \
-       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
-                          CHV_TRANSCODER_C_OFFSET, }, \
-       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
-                            CHV_PALETTE_C_OFFSET }
-
-#define CURSOR_OFFSETS \
-       .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
-
-#define IVB_CURSOR_OFFSETS \
-       .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
-
-#define BDW_COLORS \
-       .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
-#define CHV_COLORS \
-       .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
-
-static const struct intel_device_info intel_i830_info = {
-       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+static unsigned int i915_load_fail_count;
 
-static const struct intel_device_info intel_845g_info = {
-       .gen = 2, .num_pipes = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+bool __i915_inject_load_failure(const char *func, int line)
+{
+       if (i915_load_fail_count >= i915.inject_load_failure)
+               return false;
 
-static const struct intel_device_info intel_i85x_info = {
-       .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
-       .cursor_needs_physical = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       if (++i915_load_fail_count == i915.inject_load_failure) {
+               DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
+                        i915.inject_load_failure, func, line);
+               return true;
+       }
 
-static const struct intel_device_info intel_i865g_info = {
-       .gen = 2, .num_pipes = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       return false;
+}
 
-static const struct intel_device_info intel_i915g_info = {
-       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i915gm_info = {
-       .gen = 3, .is_mobile = 1, .num_pipes = 2,
-       .cursor_needs_physical = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .supports_tv = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i945g_info = {
-       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
-static const struct intel_device_info intel_i945gm_info = {
-       .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
-       .has_hotplug = 1, .cursor_needs_physical = 1,
-       .has_overlay = 1, .overlay_needs_physical = 1,
-       .supports_tv = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+                   "providing the dmesg log by booting with drm.debug=0xf"
 
-static const struct intel_device_info intel_i965g_info = {
-       .gen = 4, .is_broadwater = 1, .num_pipes = 2,
-       .has_hotplug = 1,
-       .has_overlay = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+             const char *fmt, ...)
+{
+       static bool shown_bug_once;
+       struct device *dev = dev_priv->drm.dev;
+       bool is_error = level[1] <= KERN_ERR[1];
+       bool is_debug = level[1] == KERN_DEBUG[1];
+       struct va_format vaf;
+       va_list args;
+
+       if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+               return;
 
-static const struct intel_device_info intel_i965gm_info = {
-       .gen = 4, .is_crestline = 1, .num_pipes = 2,
-       .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
-       .has_overlay = 1,
-       .supports_tv = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       va_start(args, fmt);
 
-static const struct intel_device_info intel_g33_info = {
-       .gen = 3, .is_g33 = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_overlay = 1,
-       .ring_mask = RENDER_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       vaf.fmt = fmt;
+       vaf.va = &args;
 
-static const struct intel_device_info intel_g45_info = {
-       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
-       .has_pipe_cxsr = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+                  __builtin_return_address(0), &vaf);
 
-static const struct intel_device_info intel_gm45_info = {
-       .gen = 4, .is_g4x = 1, .num_pipes = 2,
-       .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
-       .has_pipe_cxsr = 1, .has_hotplug = 1,
-       .supports_tv = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       if (is_error && !shown_bug_once) {
+               dev_notice(dev, "%s", FDO_BUG_MSG);
+               shown_bug_once = true;
+       }
 
-static const struct intel_device_info intel_pineview_info = {
-       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_overlay = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       va_end(args);
+}
 
-static const struct intel_device_info intel_ironlake_d_info = {
-       .gen = 5, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+static bool i915_error_injected(struct drm_i915_private *dev_priv)
+{
+       return i915.inject_load_failure &&
+              i915_load_fail_count == i915.inject_load_failure;
+}
+
+#define i915_load_error(dev_priv, fmt, ...)                                 \
+       __i915_printk(dev_priv,                                              \
+                     i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
+                     fmt, ##__VA_ARGS__)
+
+
+static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
+{
+       enum intel_pch ret = PCH_NOP;
+
+       /*
+        * In a virtualized passthrough environment we can be in a
+        * setup where the ISA bridge is not able to be passed through.
+        * In this case, a south bridge can be emulated and we have to
+        * make an educated guess as to which PCH is really there.
+        */
+
+       if (IS_GEN5(dev)) {
+               ret = PCH_IBX;
+               DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
+       } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+               ret = PCH_CPT;
+               DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
+       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+               ret = PCH_LPT;
+               DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
+       } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+               ret = PCH_SPT;
+               DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
+       }
+
+       return ret;
+}
+
+static void intel_detect_pch(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct pci_dev *pch = NULL;
+
+       /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
+        * (which really amounts to a PCH but no South Display).
+        */
+       if (INTEL_INFO(dev)->num_pipes == 0) {
+               dev_priv->pch_type = PCH_NOP;
+               return;
+       }
 
-static const struct intel_device_info intel_ironlake_m_info = {
-       .gen = 5, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
+       /*
+        * The reason to probe ISA bridge instead of Dev31:Fun0 is to
+        * make graphics device passthrough work easy for VMM, that only
+        * need to expose ISA bridge to let driver know the real hardware
+        * underneath. This is a requirement from virtualization team.
+        *
+        * In some virtualized environments (e.g. XEN), there is irrelevant
+        * ISA bridge in the system. To work reliably, we should scan trhough
+        * all the ISA bridge devices and check for the first match, instead
+        * of only checking the first one.
+        */
+       while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
+               if (pch->vendor == PCI_VENDOR_ID_INTEL) {
+                       unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+                       dev_priv->pch_id = id;
+
+                       if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_IBX;
+                               DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
+                               WARN_ON(!IS_GEN5(dev));
+                       } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_CPT;
+                               DRM_DEBUG_KMS("Found CougarPoint PCH\n");
+                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+                       } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
+                               /* PantherPoint is CPT compatible */
+                               dev_priv->pch_type = PCH_CPT;
+                               DRM_DEBUG_KMS("Found PantherPoint PCH\n");
+                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
+                       } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_LPT;
+                               DRM_DEBUG_KMS("Found LynxPoint PCH\n");
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
+                       } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_LPT;
+                               DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
+                       } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev) &&
+                                       !IS_KABYLAKE(dev));
+                       } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_SPT;
+                               DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
+                               WARN_ON(!IS_SKYLAKE(dev) &&
+                                       !IS_KABYLAKE(dev));
+                       } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_KBP;
+                               DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+                               WARN_ON(!IS_KABYLAKE(dev));
+                       } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+                                  (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
+                                  ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+                                   pch->subsystem_vendor ==
+                                           PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
+                                   pch->subsystem_device ==
+                                           PCI_SUBDEVICE_ID_QEMU)) {
+                               dev_priv->pch_type = intel_virt_detect_pch(dev);
+                       } else
+                               continue;
+
+                       break;
+               }
+       }
+       if (!pch)
+               DRM_DEBUG_KMS("No PCH found.\n");
+
+       pci_dev_put(pch);
+}
+
+bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_GEN(dev_priv) < 6)
+               return false;
+
+       if (i915.semaphores >= 0)
+               return i915.semaphores;
+
+       /* TODO: make semaphores and Execlists play nicely together */
+       if (i915.enable_execlists)
+               return false;
+
+#ifdef CONFIG_INTEL_IOMMU
+       /* Enable semaphores on SNB when IO remapping is off */
+       if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
+               return false;
+#endif
+
+       return true;
+}
+
+static int i915_getparam(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       drm_i915_getparam_t *param = data;
+       int value;
+
+       switch (param->param) {
+       case I915_PARAM_IRQ_ACTIVE:
+       case I915_PARAM_ALLOW_BATCHBUFFER:
+       case I915_PARAM_LAST_DISPATCH:
+               /* Reject all old ums/dri params. */
+               return -ENODEV;
+       case I915_PARAM_CHIPSET_ID:
+               value = dev->pdev->device;
+               break;
+       case I915_PARAM_REVISION:
+               value = dev->pdev->revision;
+               break;
+       case I915_PARAM_HAS_GEM:
+               value = 1;
+               break;
+       case I915_PARAM_NUM_FENCES_AVAIL:
+               value = dev_priv->num_fence_regs;
+               break;
+       case I915_PARAM_HAS_OVERLAY:
+               value = dev_priv->overlay ? 1 : 0;
+               break;
+       case I915_PARAM_HAS_PAGEFLIPPING:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXECBUF2:
+               /* depends on GEM */
+               value = 1;
+               break;
+       case I915_PARAM_HAS_BSD:
+               value = intel_engine_initialized(&dev_priv->engine[VCS]);
+               break;
+       case I915_PARAM_HAS_BLT:
+               value = intel_engine_initialized(&dev_priv->engine[BCS]);
+               break;
+       case I915_PARAM_HAS_VEBOX:
+               value = intel_engine_initialized(&dev_priv->engine[VECS]);
+               break;
+       case I915_PARAM_HAS_BSD2:
+               value = intel_engine_initialized(&dev_priv->engine[VCS2]);
+               break;
+       case I915_PARAM_HAS_RELAXED_FENCING:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_COHERENT_RINGS:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXEC_CONSTANTS:
+               value = INTEL_INFO(dev)->gen >= 4;
+               break;
+       case I915_PARAM_HAS_RELAXED_DELTA:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_GEN7_SOL_RESET:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_LLC:
+               value = HAS_LLC(dev);
+               break;
+       case I915_PARAM_HAS_WT:
+               value = HAS_WT(dev);
+               break;
+       case I915_PARAM_HAS_ALIASING_PPGTT:
+               value = USES_PPGTT(dev);
+               break;
+       case I915_PARAM_HAS_WAIT_TIMEOUT:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_SEMAPHORES:
+               value = i915_semaphore_is_enabled(dev_priv);
+               break;
+       case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_SECURE_BATCHES:
+               value = capable(CAP_SYS_ADMIN);
+               break;
+       case I915_PARAM_HAS_PINNED_BATCHES:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXEC_NO_RELOC:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_EXEC_HANDLE_LUT:
+               value = 1;
+               break;
+       case I915_PARAM_CMD_PARSER_VERSION:
+               value = i915_cmd_parser_get_version(dev_priv);
+               break;
+       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+               value = 1;
+               break;
+       case I915_PARAM_MMAP_VERSION:
+               value = 1;
+               break;
+       case I915_PARAM_SUBSLICE_TOTAL:
+               value = INTEL_INFO(dev)->subslice_total;
+               if (!value)
+                       return -ENODEV;
+               break;
+       case I915_PARAM_EU_TOTAL:
+               value = INTEL_INFO(dev)->eu_total;
+               if (!value)
+                       return -ENODEV;
+               break;
+       case I915_PARAM_HAS_GPU_RESET:
+               value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
+               break;
+       case I915_PARAM_HAS_RESOURCE_STREAMER:
+               value = HAS_RESOURCE_STREAMER(dev);
+               break;
+       case I915_PARAM_HAS_EXEC_SOFTPIN:
+               value = 1;
+               break;
+       case I915_PARAM_HAS_POOLED_EU:
+               value = HAS_POOLED_EU(dev);
+               break;
+       case I915_PARAM_MIN_EU_IN_POOL:
+               value = INTEL_INFO(dev)->min_eu_in_pool;
+               break;
+       default:
+               DRM_DEBUG("Unknown parameter %d\n", param->param);
+               return -EINVAL;
+       }
+
+       if (put_user(value, param->value))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int i915_get_bridge_dev(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+       if (!dev_priv->bridge_dev) {
+               DRM_ERROR("bridge device not found\n");
+               return -1;
+       }
+       return 0;
+}
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+       u32 temp_lo, temp_hi = 0;
+       u64 mchbar_addr;
+       int ret;
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+       pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+       mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+       /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+       if (mchbar_addr &&
+           pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+               return 0;
+#endif
+
+       /* Get some space for it */
+       dev_priv->mch_res.name = "i915 MCHBAR";
+       dev_priv->mch_res.flags = IORESOURCE_MEM;
+       ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+                                    &dev_priv->mch_res,
+                                    MCHBAR_SIZE, MCHBAR_SIZE,
+                                    PCIBIOS_MIN_MEM,
+                                    0, pcibios_align_resource,
+                                    dev_priv->bridge_dev);
+       if (ret) {
+               DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+               dev_priv->mch_res.start = 0;
+               return ret;
+       }
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+                                      upper_32_bits(dev_priv->mch_res.start));
+
+       pci_write_config_dword(dev_priv->bridge_dev, reg,
+                              lower_32_bits(dev_priv->mch_res.start));
+       return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+       u32 temp;
+       bool enabled;
+
+       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+               return;
+
+       dev_priv->mchbar_need_disable = false;
+
+       if (IS_I915G(dev) || IS_I915GM(dev)) {
+               pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
+               enabled = !!(temp & DEVEN_MCHBAR_EN);
+       } else {
+               pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+               enabled = temp & 1;
+       }
+
+       /* If it's already enabled, don't have to do anything */
+       if (enabled)
+               return;
+
+       if (intel_alloc_mchbar_resource(dev))
+               return;
+
+       dev_priv->mchbar_need_disable = true;
+
+       /* Space is allocated or reserved, so enable it. */
+       if (IS_I915G(dev) || IS_I915GM(dev)) {
+               pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+                                      temp | DEVEN_MCHBAR_EN);
+       } else {
+               pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+               pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+       }
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+       if (dev_priv->mchbar_need_disable) {
+               if (IS_I915G(dev) || IS_I915GM(dev)) {
+                       u32 deven_val;
+
+                       pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+                                             &deven_val);
+                       deven_val &= ~DEVEN_MCHBAR_EN;
+                       pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+                                              deven_val);
+               } else {
+                       u32 mchbar_val;
+
+                       pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+                                             &mchbar_val);
+                       mchbar_val &= ~1;
+                       pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+                                              mchbar_val);
+               }
+       }
+
+       if (dev_priv->mch_res.start)
+               release_resource(&dev_priv->mch_res);
+}
+
+/* true = enable decode, false = disable decoder */
+static unsigned int i915_vga_set_decode(void *cookie, bool state)
+{
+       struct drm_device *dev = cookie;
+
+       intel_modeset_vga_set_state(dev, state);
+       if (state)
+               return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+                      VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+       else
+               return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+}
+
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+       pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
+       if (state == VGA_SWITCHEROO_ON) {
+               pr_info("switched on\n");
+               dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+               /* i915 resume handler doesn't set to D0 */
+               pci_set_power_state(dev->pdev, PCI_D0);
+               i915_resume_switcheroo(dev);
+               dev->switch_power_state = DRM_SWITCH_POWER_ON;
+       } else {
+               pr_info("switched off\n");
+               dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+               i915_suspend_switcheroo(dev, pmm);
+               dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+       }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       /*
+        * FIXME: open_count is protected by drm_global_mutex but that would lead to
+        * locking inversion with the driver load path. And the access here is
+        * completely racy anyway. So don't bother with locking for now.
+        */
+       return dev->open_count == 0;
+}
+
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+       .set_gpu_state = i915_switcheroo_set_state,
+       .reprobe = NULL,
+       .can_switch = i915_switcheroo_can_switch,
 };
 
-static const struct intel_device_info intel_sandybridge_d_info = {
-       .gen = 6, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-       .has_llc = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+static void i915_gem_fini(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       /*
+        * Neither the BIOS, ourselves or any other kernel
+        * expects the system to be in execlists mode on startup,
+        * so we need to reset the GPU back to legacy mode. And the only
+        * known way to disable logical contexts is through a GPU reset.
+        *
+        * So in order to leave the system in a known default configuration,
+        * always reset the GPU upon unload. Afterwards we then clean up the
+        * GEM state tracking, flushing off the requests and leaving the
+        * system in a known idle state.
+        *
+        * Note that is of the upmost importance that the GPU is idle and
+        * all stray writes are flushed *before* we dismantle the backing
+        * storage for the pinned objects.
+        *
+        * However, since we are uncertain that reseting the GPU on older
+        * machines is a good idea, we don't - just in case it leaves the
+        * machine in an unusable condition.
+        */
+       if (HAS_HW_CONTEXTS(dev)) {
+               int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
+               WARN_ON(reset && reset != -ENODEV);
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_reset(dev);
+       i915_gem_cleanup_engines(dev);
+       i915_gem_context_fini(dev);
+       mutex_unlock(&dev->struct_mutex);
+
+       WARN_ON(!list_empty(&to_i915(dev)->context_list));
+}
+
+static int i915_load_modeset_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int ret;
+
+       if (i915_inject_load_failure())
+               return -ENODEV;
+
+       ret = intel_bios_init(dev_priv);
+       if (ret)
+               DRM_INFO("failed to find VBIOS tables\n");
+
+       /* If we have > 1 VGA cards, then we need to arbitrate access
+        * to the common VGA resources.
+        *
+        * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+        * then we do not take part in VGA arbitration and the
+        * vga_client_register() fails with -ENODEV.
+        */
+       ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+       if (ret && ret != -ENODEV)
+               goto out;
+
+       intel_register_dsm_handler();
+
+       ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
+       if (ret)
+               goto cleanup_vga_client;
+
+       /* must happen before intel_power_domains_init_hw() on VLV/CHV */
+       intel_update_rawclk(dev_priv);
+
+       intel_power_domains_init_hw(dev_priv, false);
+
+       intel_csr_ucode_init(dev_priv);
+
+       ret = intel_irq_install(dev_priv);
+       if (ret)
+               goto cleanup_csr;
+
+       intel_setup_gmbus(dev);
+
+       /* Important: The output setup functions called by modeset_init need
+        * working irqs for e.g. gmbus and dp aux transfers. */
+       intel_modeset_init(dev);
+
+       intel_guc_init(dev);
+
+       ret = i915_gem_init(dev);
+       if (ret)
+               goto cleanup_irq;
+
+       intel_modeset_gem_init(dev);
+
+       if (INTEL_INFO(dev)->num_pipes == 0)
+               return 0;
+
+       ret = intel_fbdev_init(dev);
+       if (ret)
+               goto cleanup_gem;
+
+       /* Only enable hotplug handling once the fbdev is fully set up. */
+       intel_hpd_init(dev_priv);
+
+       drm_kms_helper_poll_init(dev);
+
+       return 0;
+
+cleanup_gem:
+       i915_gem_fini(dev);
+cleanup_irq:
+       intel_guc_fini(dev);
+       drm_irq_uninstall(dev);
+       intel_teardown_gmbus(dev);
+cleanup_csr:
+       intel_csr_ucode_fini(dev_priv);
+       intel_power_domains_fini(dev_priv);
+       vga_switcheroo_unregister_client(dev->pdev);
+cleanup_vga_client:
+       vga_client_register(dev->pdev, NULL, NULL, NULL);
+out:
+       return ret;
+}
+
+#if IS_ENABLED(CONFIG_FB)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       struct apertures_struct *ap;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       bool primary;
+       int ret;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return -ENOMEM;
+
+       ap->ranges[0].base = ggtt->mappable_base;
+       ap->ranges[0].size = ggtt->mappable_end;
+
+       primary =
+               pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+       ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+       kfree(ap);
+
+       return ret;
+}
+#else
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+       return 0;
+}
+#endif
+
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       int ret = 0;
+
+       DRM_INFO("Replacing VGA console driver\n");
+
+       console_lock();
+       if (con_is_bound(&vga_con))
+               ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+       if (ret == 0) {
+               ret = do_unregister_con_driver(&vga_con);
+
+               /* Ignore "already unregistered". */
+               if (ret == -ENODEV)
+                       ret = 0;
+       }
+       console_unlock();
+
+       return ret;
+}
+#endif
+
+static void intel_init_dpio(struct drm_i915_private *dev_priv)
+{
+       /*
+        * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
+        * CHV x1 PHY (DP/HDMI D)
+        * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
+        */
+       if (IS_CHERRYVIEW(dev_priv)) {
+               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
+               DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+               DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
+       }
+}
+
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+       /*
+        * The i915 workqueue is primarily used for batched retirement of
+        * requests (and thus managing bo) once the task has been completed
+        * by the GPU. i915_gem_retire_requests() is called directly when we
+        * need high-priority retirement, such as waiting for an explicit
+        * bo.
+        *
+        * It is also used for periodic low-priority events, such as
+        * idle-timers and recording error state.
+        *
+        * All tasks on the workqueue are expected to acquire the dev mutex
+        * so there is no point in running more than one instance of the
+        * workqueue at any time.  Use an ordered one.
+        */
+       dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+       if (dev_priv->wq == NULL)
+               goto out_err;
+
+       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+       if (dev_priv->hotplug.dp_wq == NULL)
+               goto out_free_wq;
+
+       return 0;
+
+out_free_wq:
+       destroy_workqueue(dev_priv->wq);
+out_err:
+       DRM_ERROR("Failed to allocate workqueues.\n");
+
+       return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
+       destroy_workqueue(dev_priv->wq);
+}
+
+/**
+ * i915_driver_init_early - setup state not requiring device access
+ * @dev_priv: device private
+ *
+ * Initialize everything that is a "SW-only" state, that is state not
+ * requiring accessing the device or exposing the driver via kernel internal
+ * or userspace interfaces. Example steps belonging here: lock initialization,
+ * system memory allocation, setting up device specific attributes and
+ * function hooks not requiring accessing the device.
+ */
+static int i915_driver_init_early(struct drm_i915_private *dev_priv,
+                                 const struct pci_device_id *ent)
+{
+       const struct intel_device_info *match_info =
+               (struct intel_device_info *)ent->driver_data;
+       struct intel_device_info *device_info;
+       int ret = 0;
+
+       if (i915_inject_load_failure())
+               return -ENODEV;
+
+       /* Setup the write-once "constant" device info */
+       device_info = mkwrite_device_info(dev_priv);
+       memcpy(device_info, match_info, sizeof(*device_info));
+       device_info->device_id = dev_priv->drm.pdev->device;
+
+       BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+       device_info->gen_mask = BIT(device_info->gen - 1);
+
+       spin_lock_init(&dev_priv->irq_lock);
+       spin_lock_init(&dev_priv->gpu_error.lock);
+       mutex_init(&dev_priv->backlight_lock);
+       spin_lock_init(&dev_priv->uncore.lock);
+       spin_lock_init(&dev_priv->mm.object_stat_lock);
+       spin_lock_init(&dev_priv->mmio_flip_lock);
+       mutex_init(&dev_priv->sb_lock);
+       mutex_init(&dev_priv->modeset_restore_lock);
+       mutex_init(&dev_priv->av_mutex);
+       mutex_init(&dev_priv->wm.wm_mutex);
+       mutex_init(&dev_priv->pps_mutex);
+
+       ret = i915_workqueues_init(dev_priv);
+       if (ret < 0)
+               return ret;
+
+       ret = intel_gvt_init(dev_priv);
+       if (ret < 0)
+               goto err_workqueues;
+
+       /* This must be called before any calls to HAS_PCH_* */
+       intel_detect_pch(&dev_priv->drm);
+
+       intel_pm_setup(&dev_priv->drm);
+       intel_init_dpio(dev_priv);
+       intel_power_domains_init(dev_priv);
+       intel_irq_init(dev_priv);
+       intel_init_display_hooks(dev_priv);
+       intel_init_clock_gating_hooks(dev_priv);
+       intel_init_audio_hooks(dev_priv);
+       i915_gem_load_init(&dev_priv->drm);
+
+       intel_display_crc_init(&dev_priv->drm);
+
+       intel_device_info_dump(dev_priv);
+
+       /* Not all pre-production machines fall into this category, only the
+        * very first ones. Almost everything should work, except for maybe
+        * suspend/resume. And we don't implement workarounds that affect only
+        * pre-production machines. */
+       if (IS_HSW_EARLY_SDV(dev_priv))
+               DRM_INFO("This is an early pre-production Haswell machine. "
+                        "It may not be fully functional.\n");
+
+       return 0;
+
+err_workqueues:
+       i915_workqueues_cleanup(dev_priv);
+       return ret;
+}
+
+/**
+ * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+{
+       i915_gem_load_cleanup(&dev_priv->drm);
+       i915_workqueues_cleanup(dev_priv);
+}
+
+static int i915_mmio_setup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int mmio_bar;
+       int mmio_size;
+
+       mmio_bar = IS_GEN2(dev) ? 1 : 0;
+       /*
+        * Before gen4, the registers and the GTT are behind different BARs.
+        * However, from gen4 onwards, the registers and the GTT are shared
+        * in the same BAR, so we want to restrict this ioremap from
+        * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+        * the register BAR remains the same size for all the earlier
+        * generations up to Ironlake.
+        */
+       if (INTEL_INFO(dev)->gen < 5)
+               mmio_size = 512 * 1024;
+       else
+               mmio_size = 2 * 1024 * 1024;
+       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+       if (dev_priv->regs == NULL) {
+               DRM_ERROR("failed to map registers\n");
+
+               return -EIO;
+       }
+
+       /* Try to make sure MCHBAR is enabled before poking at it */
+       intel_setup_mchbar(dev);
+
+       return 0;
+}
+
+static void i915_mmio_cleanup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       intel_teardown_mchbar(dev);
+       pci_iounmap(dev->pdev, dev_priv->regs);
+}
+
+/**
+ * i915_driver_init_mmio - setup device MMIO
+ * @dev_priv: device private
+ *
+ * Setup minimal device state necessary for MMIO accesses later in the
+ * initialization sequence. The setup here should avoid any other device-wide
+ * side effects or exposing the driver via kernel internal or user space
+ * interfaces.
+ */
+static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       int ret;
+
+       if (i915_inject_load_failure())
+               return -ENODEV;
+
+       if (i915_get_bridge_dev(dev))
+               return -EIO;
+
+       ret = i915_mmio_setup(dev);
+       if (ret < 0)
+               goto put_bridge;
+
+       intel_uncore_init(dev_priv);
+
+       return 0;
+
+put_bridge:
+       pci_dev_put(dev_priv->bridge_dev);
+
+       return ret;
+}
+
+/**
+ * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+
+       intel_uncore_fini(dev_priv);
+       i915_mmio_cleanup(dev);
+       pci_dev_put(dev_priv->bridge_dev);
+}
+
+static void intel_sanitize_options(struct drm_i915_private *dev_priv)
+{
+       i915.enable_execlists =
+               intel_sanitize_enable_execlists(dev_priv,
+                                               i915.enable_execlists);
+
+       /*
+        * i915.enable_ppgtt is read-only, so do an early pass to validate the
+        * user's requested state against the hardware/driver capabilities.  We
+        * do this now so that we can print out any log messages once rather
+        * than every time we check intel_enable_ppgtt().
+        */
+       i915.enable_ppgtt =
+               intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
+       DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
+}
+
+/**
+ * i915_driver_init_hw - setup state requiring device access
+ * @dev_priv: device private
+ *
+ * Setup state that requires accessing the device, but doesn't require
+ * exposing the driver via kernel internal or userspace interfaces.
+ */
+static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       uint32_t aperture_size;
+       int ret;
+
+       if (i915_inject_load_failure())
+               return -ENODEV;
+
+       intel_device_info_runtime_init(dev_priv);
+
+       intel_sanitize_options(dev_priv);
+
+       ret = i915_ggtt_init_hw(dev);
+       if (ret)
+               return ret;
+
+       ret = i915_ggtt_enable_hw(dev);
+       if (ret) {
+               DRM_ERROR("failed to enable GGTT\n");
+               goto out_ggtt;
+       }
+
+       /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+        * otherwise the vga fbdev driver falls over. */
+       ret = i915_kick_out_firmware_fb(dev_priv);
+       if (ret) {
+               DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+               goto out_ggtt;
+       }
+
+       ret = i915_kick_out_vgacon(dev_priv);
+       if (ret) {
+               DRM_ERROR("failed to remove conflicting VGA console\n");
+               goto out_ggtt;
+       }
+
+       pci_set_master(dev->pdev);
+
+       /* overlay on gen2 is broken and can't address above 1G */
+       if (IS_GEN2(dev)) {
+               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+               if (ret) {
+                       DRM_ERROR("failed to set DMA mask\n");
+
+                       goto out_ggtt;
+               }
+       }
+
+
+       /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+        * using 32bit addressing, overwriting memory if HWS is located
+        * above 4GB.
+        *
+        * The documentation also mentions an issue with undefined
+        * behaviour if any general state is accessed within a page above 4GB,
+        * which also needs to be handled carefully.
+        */
+       if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
+               ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
+               if (ret) {
+                       DRM_ERROR("failed to set DMA mask\n");
+
+                       goto out_ggtt;
+               }
+       }
+
+       aperture_size = ggtt->mappable_end;
+
+       ggtt->mappable =
+               io_mapping_create_wc(ggtt->mappable_base,
+                                    aperture_size);
+       if (!ggtt->mappable) {
+               ret = -EIO;
+               goto out_ggtt;
+       }
+
+       ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
+                                             aperture_size);
+
+       pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+                          PM_QOS_DEFAULT_VALUE);
+
+       intel_uncore_sanitize(dev_priv);
+
+       intel_opregion_setup(dev_priv);
+
+       i915_gem_load_init_fences(dev_priv);
+
+       /* On the 945G/GM, the chipset reports the MSI capability on the
+        * integrated graphics even though the support isn't actually there
+        * according to the published specs.  It doesn't appear to function
+        * correctly in testing on 945G.
+        * This may be a side effect of MSI having been made available for PEG
+        * and the registers being closely associated.
+        *
+        * According to chipset errata, on the 965GM, MSI interrupts may
+        * be lost or delayed, but we use them anyways to avoid
+        * stuck interrupts on some machines.
+        */
+       if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+               if (pci_enable_msi(dev->pdev) < 0)
+                       DRM_DEBUG_DRIVER("can't enable MSI");
+       }
+
+       return 0;
+
+out_ggtt:
+       i915_ggtt_cleanup_hw(dev);
+
+       return ret;
+}
+
+/**
+ * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+       if (dev->pdev->msi_enabled)
+               pci_disable_msi(dev->pdev);
+
+       pm_qos_remove_request(&dev_priv->pm_qos);
+       arch_phys_wc_del(ggtt->mtrr);
+       io_mapping_free(ggtt->mappable);
+       i915_ggtt_cleanup_hw(dev);
+}
+
+/**
+ * i915_driver_register - register the driver with the rest of the system
+ * @dev_priv: device private
+ *
+ * Perform any steps necessary to make the driver available via kernel
+ * internal or userspace interfaces.
+ */
+static void i915_driver_register(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = &dev_priv->drm;
+
+       i915_gem_shrinker_init(dev_priv);
+
+       /*
+        * Notify a valid surface after modesetting,
+        * when running inside a VM.
+        */
+       if (intel_vgpu_active(dev_priv))
+               I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+
+       /* Reveal our presence to userspace */
+       if (drm_dev_register(dev, 0) == 0) {
+               i915_debugfs_register(dev_priv);
+               i915_setup_sysfs(dev);
+       } else
+               DRM_ERROR("Failed to register driver for userspace access!\n");
+
+       if (INTEL_INFO(dev_priv)->num_pipes) {
+               /* Must be done after probing outputs */
+               intel_opregion_register(dev_priv);
+               acpi_video_register();
+       }
+
+       if (IS_GEN5(dev_priv))
+               intel_gpu_ips_init(dev_priv);
+
+       i915_audio_component_init(dev_priv);
+
+       /*
+        * Some ports require correctly set-up hpd registers for detection to
+        * work properly (leading to ghost connected connector status), e.g. VGA
+        * on gm45.  Hence we can only set up the initial fbdev config after hpd
+        * irqs are fully enabled. We do it last so that the async config
+        * cannot run before the connectors are registered.
+        */
+       intel_fbdev_initial_config_async(dev);
+}
+
+/**
+ * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
+ * @dev_priv: device private
+ */
+static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+{
+       i915_audio_component_cleanup(dev_priv);
+
+       intel_gpu_ips_teardown();
+       acpi_video_unregister();
+       intel_opregion_unregister(dev_priv);
+
+       i915_teardown_sysfs(&dev_priv->drm);
+       i915_debugfs_unregister(dev_priv);
+       drm_dev_unregister(&dev_priv->drm);
+
+       i915_gem_shrinker_cleanup(dev_priv);
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ *   - drive output discovery via intel_modeset_init()
+ *   - initialize the memory manager
+ *   - allocate initial config memory
+ *   - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct drm_i915_private *dev_priv;
+       int ret;
 
-static const struct intel_device_info intel_sandybridge_m_info = {
-       .gen = 6, .is_mobile = 1, .num_pipes = 2,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_fbc = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
-       .has_llc = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-};
+       if (i915.nuclear_pageflip)
+               driver.driver_features |= DRIVER_ATOMIC;
 
-#define GEN7_FEATURES  \
-       .gen = 7, .num_pipes = 3, \
-       .need_gfx_hws = 1, .has_hotplug = 1, \
-       .has_fbc = 1, \
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
-       .has_llc = 1, \
-       GEN_DEFAULT_PIPEOFFSETS, \
-       IVB_CURSOR_OFFSETS
-
-static const struct intel_device_info intel_ivybridge_d_info = {
-       GEN7_FEATURES,
-       .is_ivybridge = 1,
-};
+       ret = -ENOMEM;
+       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+       if (dev_priv)
+               ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
+       if (ret) {
+               dev_printk(KERN_ERR, &pdev->dev,
+                          "[" DRM_NAME ":%s] allocation failed\n", __func__);
+               kfree(dev_priv);
+               return ret;
+       }
 
-static const struct intel_device_info intel_ivybridge_m_info = {
-       GEN7_FEATURES,
-       .is_ivybridge = 1,
-       .is_mobile = 1,
-};
+       dev_priv->drm.pdev = pdev;
+       dev_priv->drm.dev_private = dev_priv;
 
-static const struct intel_device_info intel_ivybridge_q_info = {
-       GEN7_FEATURES,
-       .is_ivybridge = 1,
-       .num_pipes = 0, /* legal, last one wins */
-};
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto out_free_priv;
 
-#define VLV_FEATURES  \
-       .gen = 7, .num_pipes = 2, \
-       .need_gfx_hws = 1, .has_hotplug = 1, \
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
-       .display_mmio_offset = VLV_DISPLAY_BASE, \
-       GEN_DEFAULT_PIPEOFFSETS, \
-       CURSOR_OFFSETS
-
-static const struct intel_device_info intel_valleyview_m_info = {
-       VLV_FEATURES,
-       .is_valleyview = 1,
-       .is_mobile = 1,
-};
+       pci_set_drvdata(pdev, &dev_priv->drm);
 
-static const struct intel_device_info intel_valleyview_d_info = {
-       VLV_FEATURES,
-       .is_valleyview = 1,
-};
+       ret = i915_driver_init_early(dev_priv, ent);
+       if (ret < 0)
+               goto out_pci_disable;
 
-#define HSW_FEATURES  \
-       GEN7_FEATURES, \
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
-       .has_ddi = 1, \
-       .has_fpga_dbg = 1
+       intel_runtime_pm_get(dev_priv);
 
-static const struct intel_device_info intel_haswell_d_info = {
-       HSW_FEATURES,
-       .is_haswell = 1,
-};
+       ret = i915_driver_init_mmio(dev_priv);
+       if (ret < 0)
+               goto out_runtime_pm_put;
 
-static const struct intel_device_info intel_haswell_m_info = {
-       HSW_FEATURES,
-       .is_haswell = 1,
-       .is_mobile = 1,
-};
+       ret = i915_driver_init_hw(dev_priv);
+       if (ret < 0)
+               goto out_cleanup_mmio;
 
-#define BDW_FEATURES \
-       HSW_FEATURES, \
-       BDW_COLORS
+       /*
+        * TODO: move the vblank init and parts of modeset init steps into one
+        * of the i915_driver_init_/i915_driver_register functions according
+        * to the role/effect of the given init step.
+        */
+       if (INTEL_INFO(dev_priv)->num_pipes) {
+               ret = drm_vblank_init(&dev_priv->drm,
+                                     INTEL_INFO(dev_priv)->num_pipes);
+               if (ret)
+                       goto out_cleanup_hw;
+       }
 
-static const struct intel_device_info intel_broadwell_d_info = {
-       BDW_FEATURES,
-       .gen = 8,
-};
+       ret = i915_load_modeset_init(&dev_priv->drm);
+       if (ret < 0)
+               goto out_cleanup_vblank;
 
-static const struct intel_device_info intel_broadwell_m_info = {
-       BDW_FEATURES,
-       .gen = 8, .is_mobile = 1,
-};
+       i915_driver_register(dev_priv);
 
-static const struct intel_device_info intel_broadwell_gt3d_info = {
-       BDW_FEATURES,
-       .gen = 8,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+       intel_runtime_pm_enable(dev_priv);
 
-static const struct intel_device_info intel_broadwell_gt3m_info = {
-       BDW_FEATURES,
-       .gen = 8, .is_mobile = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+       intel_runtime_pm_put(dev_priv);
 
-static const struct intel_device_info intel_cherryview_info = {
-       .gen = 8, .num_pipes = 3,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-       .is_cherryview = 1,
-       .display_mmio_offset = VLV_DISPLAY_BASE,
-       GEN_CHV_PIPEOFFSETS,
-       CURSOR_OFFSETS,
-       CHV_COLORS,
-};
+       return 0;
 
-static const struct intel_device_info intel_skylake_info = {
-       BDW_FEATURES,
-       .is_skylake = 1,
-       .gen = 9,
-};
+out_cleanup_vblank:
+       drm_vblank_cleanup(&dev_priv->drm);
+out_cleanup_hw:
+       i915_driver_cleanup_hw(dev_priv);
+out_cleanup_mmio:
+       i915_driver_cleanup_mmio(dev_priv);
+out_runtime_pm_put:
+       intel_runtime_pm_put(dev_priv);
+       i915_driver_cleanup_early(dev_priv);
+out_pci_disable:
+       pci_disable_device(pdev);
+out_free_priv:
+       i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+       drm_dev_unref(&dev_priv->drm);
+       return ret;
+}
 
-static const struct intel_device_info intel_skylake_gt3_info = {
-       BDW_FEATURES,
-       .is_skylake = 1,
-       .gen = 9,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+void i915_driver_unload(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
-static const struct intel_device_info intel_broxton_info = {
-       .is_preliminary = 1,
-       .is_broxton = 1,
-       .gen = 9,
-       .need_gfx_hws = 1, .has_hotplug = 1,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
-       .num_pipes = 3,
-       .has_ddi = 1,
-       .has_fpga_dbg = 1,
-       .has_fbc = 1,
-       GEN_DEFAULT_PIPEOFFSETS,
-       IVB_CURSOR_OFFSETS,
-       BDW_COLORS,
-};
+       intel_fbdev_fini(dev);
 
-static const struct intel_device_info intel_kabylake_info = {
-       BDW_FEATURES,
-       .is_kabylake = 1,
-       .gen = 9,
-};
+       if (i915_gem_suspend(dev))
+               DRM_ERROR("failed to idle hardware; continuing to unload!\n");
 
-static const struct intel_device_info intel_kabylake_gt3_info = {
-       BDW_FEATURES,
-       .is_kabylake = 1,
-       .gen = 9,
-       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
-};
+       intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
 
-/*
- * Make sure any device matches here are from most specific to most
- * general.  For example, since the Quanta match is based on the subsystem
- * and subvendor IDs, we need it to come before the more general IVB
- * PCI ID matches, otherwise we'll use the wrong info struct above.
- */
-static const struct pci_device_id pciidlist[] = {
-       INTEL_I830_IDS(&intel_i830_info),
-       INTEL_I845G_IDS(&intel_845g_info),
-       INTEL_I85X_IDS(&intel_i85x_info),
-       INTEL_I865G_IDS(&intel_i865g_info),
-       INTEL_I915G_IDS(&intel_i915g_info),
-       INTEL_I915GM_IDS(&intel_i915gm_info),
-       INTEL_I945G_IDS(&intel_i945g_info),
-       INTEL_I945GM_IDS(&intel_i945gm_info),
-       INTEL_I965G_IDS(&intel_i965g_info),
-       INTEL_G33_IDS(&intel_g33_info),
-       INTEL_I965GM_IDS(&intel_i965gm_info),
-       INTEL_GM45_IDS(&intel_gm45_info),
-       INTEL_G45_IDS(&intel_g45_info),
-       INTEL_PINEVIEW_IDS(&intel_pineview_info),
-       INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
-       INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
-       INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
-       INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
-       INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
-       INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
-       INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
-       INTEL_HSW_D_IDS(&intel_haswell_d_info),
-       INTEL_HSW_M_IDS(&intel_haswell_m_info),
-       INTEL_VLV_M_IDS(&intel_valleyview_m_info),
-       INTEL_VLV_D_IDS(&intel_valleyview_d_info),
-       INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
-       INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
-       INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
-       INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
-       INTEL_CHV_IDS(&intel_cherryview_info),
-       INTEL_SKL_GT1_IDS(&intel_skylake_info),
-       INTEL_SKL_GT2_IDS(&intel_skylake_info),
-       INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
-       INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
-       INTEL_BXT_IDS(&intel_broxton_info),
-       INTEL_KBL_GT1_IDS(&intel_kabylake_info),
-       INTEL_KBL_GT2_IDS(&intel_kabylake_info),
-       INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
-       INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
-       {0, 0, 0}
-};
+       i915_driver_unregister(dev_priv);
 
-MODULE_DEVICE_TABLE(pci, pciidlist);
+       drm_vblank_cleanup(dev);
 
-static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
-{
-       enum intel_pch ret = PCH_NOP;
+       intel_modeset_cleanup(dev);
 
        /*
-        * In a virtualized passthrough environment we can be in a
-        * setup where the ISA bridge is not able to be passed through.
-        * In this case, a south bridge can be emulated and we have to
-        * make an educated guess as to which PCH is really there.
+        * free the memory space allocated for the child device
+        * config parsed from VBT
         */
-
-       if (IS_GEN5(dev)) {
-               ret = PCH_IBX;
-               DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
-       } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
-               ret = PCH_CPT;
-               DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
-       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-               ret = PCH_LPT;
-               DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
-       } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
-               ret = PCH_SPT;
-               DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
+       if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+               kfree(dev_priv->vbt.child_dev);
+               dev_priv->vbt.child_dev = NULL;
+               dev_priv->vbt.child_dev_num = 0;
        }
+       kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+       dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+       kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+       dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
 
-       return ret;
-}
+       vga_switcheroo_unregister_client(dev->pdev);
+       vga_client_register(dev->pdev, NULL, NULL, NULL);
 
-void intel_detect_pch(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct pci_dev *pch = NULL;
+       intel_csr_ucode_fini(dev_priv);
 
-       /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
-        * (which really amounts to a PCH but no South Display).
-        */
-       if (INTEL_INFO(dev)->num_pipes == 0) {
-               dev_priv->pch_type = PCH_NOP;
-               return;
-       }
+       /* Free error state after interrupts are fully disabled. */
+       cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+       i915_destroy_error_state(dev);
 
-       /*
-        * The reason to probe ISA bridge instead of Dev31:Fun0 is to
-        * make graphics device passthrough work easy for VMM, that only
-        * need to expose ISA bridge to let driver know the real hardware
-        * underneath. This is a requirement from virtualization team.
-        *
-        * In some virtualized environments (e.g. XEN), there is irrelevant
-        * ISA bridge in the system. To work reliably, we should scan trhough
-        * all the ISA bridge devices and check for the first match, instead
-        * of only checking the first one.
-        */
-       while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
-               if (pch->vendor == PCI_VENDOR_ID_INTEL) {
-                       unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
-                       dev_priv->pch_id = id;
+       /* Flush any outstanding unpin_work. */
+       flush_workqueue(dev_priv->wq);
 
-                       if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_IBX;
-                               DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
-                               WARN_ON(!IS_GEN5(dev));
-                       } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_CPT;
-                               DRM_DEBUG_KMS("Found CougarPoint PCH\n");
-                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
-                       } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
-                               /* PantherPoint is CPT compatible */
-                               dev_priv->pch_type = PCH_CPT;
-                               DRM_DEBUG_KMS("Found PantherPoint PCH\n");
-                               WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
-                       } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_LPT;
-                               DRM_DEBUG_KMS("Found LynxPoint PCH\n");
-                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
-                               WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
-                       } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_LPT;
-                               DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
-                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
-                               WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
-                       } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_SPT;
-                               DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
-                               WARN_ON(!IS_SKYLAKE(dev) &&
-                                       !IS_KABYLAKE(dev));
-                       } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
-                               dev_priv->pch_type = PCH_SPT;
-                               DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
-                               WARN_ON(!IS_SKYLAKE(dev) &&
-                                       !IS_KABYLAKE(dev));
-                       } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
-                                  (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
-                                  ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
-                                   pch->subsystem_vendor == 0x1af4 &&
-                                   pch->subsystem_device == 0x1100)) {
-                               dev_priv->pch_type = intel_virt_detect_pch(dev);
-                       } else
-                               continue;
+       intel_guc_fini(dev);
+       i915_gem_fini(dev);
+       intel_fbc_cleanup_cfb(dev_priv);
 
-                       break;
-               }
-       }
-       if (!pch)
-               DRM_DEBUG_KMS("No PCH found.\n");
+       intel_power_domains_fini(dev_priv);
 
-       pci_dev_put(pch);
+       i915_driver_cleanup_hw(dev_priv);
+       i915_driver_cleanup_mmio(dev_priv);
+
+       intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+
+       i915_driver_cleanup_early(dev_priv);
 }
 
-bool i915_semaphore_is_enabled(struct drm_device *dev)
+static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 {
-       if (INTEL_INFO(dev)->gen < 6)
-               return false;
+       int ret;
 
-       if (i915.semaphores >= 0)
-               return i915.semaphores;
+       ret = i915_gem_open(dev, file);
+       if (ret)
+               return ret;
 
-       /* TODO: make semaphores and Execlists play nicely together */
-       if (i915.enable_execlists)
-               return false;
+       return 0;
+}
 
-       /* Until we get further testing... */
-       if (IS_GEN8(dev))
-               return false;
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited.  In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
+static void i915_driver_lastclose(struct drm_device *dev)
+{
+       intel_fbdev_restore_mode(dev);
+       vga_switcheroo_process_delayed_switch();
+}
 
-#ifdef CONFIG_INTEL_IOMMU
-       /* Enable semaphores on SNB when IO remapping is off */
-       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
-               return false;
-#endif
+static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
+{
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_context_close(dev, file);
+       i915_gem_release(dev, file);
+       mutex_unlock(&dev->struct_mutex);
+}
 
-       return true;
+static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+       struct drm_i915_file_private *file_priv = file->driver_priv;
+
+       kfree(file_priv);
 }
 
 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *encoder;
 
        drm_modeset_lock_all(dev);
@@ -582,7 +1430,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
 
 static int i915_drm_suspend(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        pci_power_t opregion_target_state;
        int error;
 
@@ -610,7 +1458,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        intel_guc_suspend(dev);
 
-       intel_suspend_gt_powersave(dev);
+       intel_suspend_gt_powersave(dev_priv);
 
        intel_display_suspend(dev);
 
@@ -628,10 +1476,10 @@ static int i915_drm_suspend(struct drm_device *dev)
        i915_save_state(dev);
 
        opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
-       intel_opregion_notify_adapter(dev, opregion_target_state);
+       intel_opregion_notify_adapter(dev_priv, opregion_target_state);
 
-       intel_uncore_forcewake_reset(dev, false);
-       intel_opregion_fini(dev);
+       intel_uncore_forcewake_reset(dev_priv, false);
+       intel_opregion_unregister(dev_priv);
 
        intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
 
@@ -649,7 +1497,7 @@ out:
 
 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
 {
-       struct drm_i915_private *dev_priv = drm_dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(drm_dev);
        bool fw_csr;
        int ret;
 
@@ -711,7 +1559,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
 {
        int error;
 
-       if (!dev || !dev->dev_private) {
+       if (!dev) {
                DRM_ERROR("dev: %p\n", dev);
                DRM_ERROR("DRM not initialized, aborting suspend.\n");
                return -ENODEV;
@@ -733,7 +1581,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
 
 static int i915_drm_resume(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        disable_rpm_wakeref_asserts(dev_priv);
@@ -749,7 +1597,7 @@ static int i915_drm_resume(struct drm_device *dev)
        mutex_unlock(&dev->struct_mutex);
 
        i915_restore_state(dev);
-       intel_opregion_setup(dev);
+       intel_opregion_setup(dev_priv);
 
        intel_init_pch_refclk(dev);
        drm_mode_config_reset(dev);
@@ -767,7 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev)
        mutex_lock(&dev->struct_mutex);
        if (i915_gem_init_hw(dev)) {
                DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-                       atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+               atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
        }
        mutex_unlock(&dev->struct_mutex);
 
@@ -777,7 +1625,7 @@ static int i915_drm_resume(struct drm_device *dev)
 
        spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev);
+               dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
 
        intel_dp_mst_resume(dev);
@@ -794,7 +1642,7 @@ static int i915_drm_resume(struct drm_device *dev)
        /* Config may have changed between suspend and resume */
        drm_helper_hpd_irq_event(dev);
 
-       intel_opregion_init(dev);
+       intel_opregion_register(dev_priv);
 
        intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
 
@@ -802,7 +1650,7 @@ static int i915_drm_resume(struct drm_device *dev)
        dev_priv->modeset_restore = MODESET_DONE;
        mutex_unlock(&dev_priv->modeset_restore_lock);
 
-       intel_opregion_notify_adapter(dev, PCI_D0);
+       intel_opregion_notify_adapter(dev_priv, PCI_D0);
 
        drm_kms_helper_poll_enable(dev);
 
@@ -813,7 +1661,7 @@ static int i915_drm_resume(struct drm_device *dev)
 
 static int i915_drm_resume_early(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        /*
@@ -870,9 +1718,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
                DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
                          ret);
 
-       intel_uncore_early_sanitize(dev, true);
+       intel_uncore_early_sanitize(dev_priv, true);
 
-       if (IS_BROXTON(dev)) {
+       if (IS_BROXTON(dev_priv)) {
                if (!dev_priv->suspended_to_idle)
                        gen9_sanitize_dc_state(dev_priv);
                bxt_disable_dc9(dev_priv);
@@ -880,7 +1728,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
                hsw_disable_pc8(dev_priv);
        }
 
-       intel_uncore_sanitize(dev);
+       intel_uncore_sanitize(dev_priv);
 
        if (IS_BROXTON(dev_priv) ||
            !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@@ -923,14 +1771,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
  *   - re-init interrupt state
  *   - re-init display
  */
-int i915_reset(struct drm_device *dev)
+int i915_reset(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_gpu_error *error = &dev_priv->gpu_error;
        unsigned reset_counter;
        int ret;
 
-       intel_reset_gt_powersave(dev);
+       intel_reset_gt_powersave(dev_priv);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -944,24 +1792,11 @@ int i915_reset(struct drm_device *dev)
                goto error;
        }
 
-       i915_gem_reset(dev);
-
-       ret = intel_gpu_reset(dev, ALL_ENGINES);
+       pr_notice("drm/i915: Resetting chip after gpu hang\n");
 
-       /* Also reset the gpu hangman. */
-       if (error->stop_rings != 0) {
-               DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-               error->stop_rings = 0;
-               if (ret == -ENODEV) {
-                       DRM_INFO("Reset not implemented, but ignoring "
-                                "error for simulated gpu hangs\n");
-                       ret = 0;
-               }
-       }
-
-       if (i915_stop_ring_allow_warn(dev_priv))
-               pr_notice("drm/i915: Resetting chip after gpu hang\n");
+       i915_gem_reset(dev);
 
+       ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
        if (ret) {
                if (ret != -ENODEV)
                        DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1001,7 +1836,7 @@ int i915_reset(struct drm_device *dev)
         * of re-init after reset.
         */
        if (INTEL_INFO(dev)->gen > 5)
-               intel_enable_gt_powersave(dev);
+               intel_enable_gt_powersave(dev_priv);
 
        return 0;
 
@@ -1011,51 +1846,12 @@ error:
        return ret;
 }
 
-static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       struct intel_device_info *intel_info =
-               (struct intel_device_info *) ent->driver_data;
-
-       if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
-               DRM_INFO("This hardware requires preliminary hardware support.\n"
-                        "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
-               return -ENODEV;
-       }
-
-       /* Only bind to function 0 of the device. Early generations
-        * used function 1 as a placeholder for multi-head. This causes
-        * us confusion instead, especially on the systems where both
-        * functions have the same PCI-ID!
-        */
-       if (PCI_FUNC(pdev->devfn))
-               return -ENODEV;
-
-       /*
-        * apple-gmux is needed on dual GPU MacBook Pro
-        * to probe the panel if we're the inactive GPU.
-        */
-       if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
-           apple_gmux_present() && pdev != vga_default_device() &&
-           !vga_switcheroo_handler_flags())
-               return -EPROBE_DEFER;
-
-       return drm_get_pci_dev(pdev, ent, &driver);
-}
-
-static void
-i915_pci_remove(struct pci_dev *pdev)
-{
-       struct drm_device *dev = pci_get_drvdata(pdev);
-
-       drm_put_dev(dev);
-}
-
 static int i915_pm_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
-       if (!drm_dev || !drm_dev->dev_private) {
+       if (!drm_dev) {
                dev_err(dev, "DRM not initialized, aborting suspend.\n");
                return -ENODEV;
        }
@@ -1068,7 +1864,7 @@ static int i915_pm_suspend(struct device *dev)
 
 static int i915_pm_suspend_late(struct device *dev)
 {
-       struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
 
        /*
         * We have a suspend ordering issue with the snd-hda driver also
@@ -1087,7 +1883,7 @@ static int i915_pm_suspend_late(struct device *dev)
 
 static int i915_pm_poweroff_late(struct device *dev)
 {
-       struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
 
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
@@ -1097,7 +1893,7 @@ static int i915_pm_poweroff_late(struct device *dev)
 
 static int i915_pm_resume_early(struct device *dev)
 {
-       struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
 
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
@@ -1107,7 +1903,7 @@ static int i915_pm_resume_early(struct device *dev)
 
 static int i915_pm_resume(struct device *dev)
 {
-       struct drm_device *drm_dev = dev_to_i915(dev)->dev;
+       struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
 
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
@@ -1115,6 +1911,49 @@ static int i915_pm_resume(struct device *dev)
        return i915_drm_resume(drm_dev);
 }
 
+/* freeze: before creating the hibernation_image */
+static int i915_pm_freeze(struct device *dev)
+{
+       return i915_pm_suspend(dev);
+}
+
+static int i915_pm_freeze_late(struct device *dev)
+{
+       int ret;
+
+       ret = i915_pm_suspend_late(dev);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_freeze_late(dev_to_i915(dev));
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+/* thaw: called after creating the hibernation image, but before turning off. */
+static int i915_pm_thaw_early(struct device *dev)
+{
+       return i915_pm_resume_early(dev);
+}
+
+static int i915_pm_thaw(struct device *dev)
+{
+       return i915_pm_resume(dev);
+}
+
+/* restore: called after loading the hibernation image. */
+static int i915_pm_restore_early(struct device *dev)
+{
+       return i915_pm_resume_early(dev);
+}
+
+static int i915_pm_restore(struct device *dev)
+{
+       return i915_pm_resume(dev);
+}
+
 /*
  * Save all Gunit registers that may be lost after a D3 and a subsequent
  * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1314,8 +2153,6 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
        u32 val;
        int err;
 
-#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
-
        val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
        val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
        if (force_on)
@@ -1325,13 +2162,16 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
        if (!force_on)
                return 0;
 
-       err = wait_for(COND, 20);
+       err = intel_wait_for_register(dev_priv,
+                                     VLV_GTLC_SURVIVABILITY_REG,
+                                     VLV_GFX_CLK_STATUS_BIT,
+                                     VLV_GFX_CLK_STATUS_BIT,
+                                     20);
        if (err)
                DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
                          I915_READ(VLV_GTLC_SURVIVABILITY_REG));
 
        return err;
-#undef COND
 }
 
 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
@@ -1346,13 +2186,15 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
        I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
        POSTING_READ(VLV_GTLC_WAKE_CTRL);
 
-#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
-             allow)
-       err = wait_for(COND, 1);
+       err = intel_wait_for_register(dev_priv,
+                                     VLV_GTLC_PW_STATUS,
+                                     VLV_GTLC_ALLOWWAKEACK,
+                                     allow,
+                                     1);
        if (err)
                DRM_ERROR("timeout disabling GT waking\n");
+
        return err;
-#undef COND
 }
 
 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
@@ -1364,8 +2206,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
 
        mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
        val = wait_for_on ? mask : 0;
-#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
-       if (COND)
+       if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
                return 0;
 
        DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
@@ -1376,13 +2217,14 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
         * RC6 transitioning can be delayed up to 2 msec (see
         * valleyview_enable_rps), use 3 msec for safety.
         */
-       err = wait_for(COND, 3);
+       err = intel_wait_for_register(dev_priv,
+                                     VLV_GTLC_PW_STATUS, mask, val,
+                                     3);
        if (err)
                DRM_ERROR("timeout waiting for GT wells to go %s\n",
                          onoff(wait_for_on));
 
        return err;
-#undef COND
 }
 
 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@ -1439,7 +2281,7 @@ err1:
 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
                                bool rpm_resume)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int err;
        int ret;
 
@@ -1475,10 +2317,10 @@ static int intel_runtime_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct drm_device *dev = pci_get_drvdata(pdev);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
-       if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
+       if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
                return -ENODEV;
 
        if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1513,11 +2355,8 @@ static int intel_runtime_suspend(struct device *device)
        i915_gem_release_all_mmaps(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
-       cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-
        intel_guc_suspend(dev);
 
-       intel_suspend_gt_powersave(dev);
        intel_runtime_pm_disable_interrupts(dev_priv);
 
        ret = 0;
@@ -1539,7 +2378,7 @@ static int intel_runtime_suspend(struct device *device)
                return ret;
        }
 
-       intel_uncore_forcewake_reset(dev, false);
+       intel_uncore_forcewake_reset(dev_priv, false);
 
        enable_rpm_wakeref_asserts(dev_priv);
        WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -1553,14 +2392,14 @@ static int intel_runtime_suspend(struct device *device)
         * FIXME: We really should find a document that references the arguments
         * used below!
         */
-       if (IS_BROADWELL(dev)) {
+       if (IS_BROADWELL(dev_priv)) {
                /*
                 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
                 * being detected, and the call we do at intel_runtime_resume()
                 * won't be able to restore them. Since PCI_D3hot matches the
                 * actual specification and appears to be working, use it.
                 */
-               intel_opregion_notify_adapter(dev, PCI_D3hot);
+               intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
        } else {
                /*
                 * current versions of firmware which depend on this opregion
@@ -1569,11 +2408,14 @@ static int intel_runtime_suspend(struct device *device)
                 * to distinguish it from notifications that might be sent via
                 * the suspend path.
                 */
-               intel_opregion_notify_adapter(dev, PCI_D1);
+               intel_opregion_notify_adapter(dev_priv, PCI_D1);
        }
 
        assert_forcewakes_inactive(dev_priv);
 
+       if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+               intel_hpd_poll_init(dev_priv);
+
        DRM_DEBUG_KMS("Device suspended\n");
        return 0;
 }
@@ -1582,7 +2424,7 @@ static int intel_runtime_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct drm_device *dev = pci_get_drvdata(pdev);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
        if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1593,7 +2435,7 @@ static int intel_runtime_resume(struct device *device)
        WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
        disable_rpm_wakeref_asserts(dev_priv);
 
-       intel_opregion_notify_adapter(dev, PCI_D0);
+       intel_opregion_notify_adapter(dev_priv, PCI_D0);
        dev_priv->pm.suspended = false;
        if (intel_uncore_unclaimed_mmio(dev_priv))
                DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -1620,7 +2462,7 @@ static int intel_runtime_resume(struct device *device)
         * we can do is to hope that things will still work (and disable RPM).
         */
        i915_gem_init_swizzling(dev);
-       gen6_update_ring_freq(dev);
+       gen6_update_ring_freq(dev_priv);
 
        intel_runtime_pm_enable_interrupts(dev_priv);
 
@@ -1632,8 +2474,6 @@ static int intel_runtime_resume(struct device *device)
        if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                intel_hpd_init(dev_priv);
 
-       intel_enable_gt_powersave(dev);
-
        enable_rpm_wakeref_asserts(dev_priv);
 
        if (ret)
@@ -1644,7 +2484,7 @@ static int intel_runtime_resume(struct device *device)
        return ret;
 }
 
-static const struct dev_pm_ops i915_pm_ops = {
+const struct dev_pm_ops i915_pm_ops = {
        /*
         * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
         * PMSG_RESUME]
@@ -1669,14 +2509,14 @@ static const struct dev_pm_ops i915_pm_ops = {
         * @restore, @restore_early : called after rebooting and restoring the
         *                            hibernation image [PMSG_RESTORE]
         */
-       .freeze = i915_pm_suspend,
-       .freeze_late = i915_pm_suspend_late,
-       .thaw_early = i915_pm_resume_early,
-       .thaw = i915_pm_resume,
+       .freeze = i915_pm_freeze,
+       .freeze_late = i915_pm_freeze_late,
+       .thaw_early = i915_pm_thaw_early,
+       .thaw = i915_pm_thaw,
        .poweroff = i915_pm_suspend,
        .poweroff_late = i915_pm_poweroff_late,
-       .restore_early = i915_pm_resume_early,
-       .restore = i915_pm_resume,
+       .restore_early = i915_pm_restore_early,
+       .restore = i915_pm_restore,
 
        /* S0ix (via runtime suspend) event handlers */
        .runtime_suspend = intel_runtime_suspend,
@@ -1703,6 +2543,68 @@ static const struct file_operations i915_driver_fops = {
        .llseek = noop_llseek,
 };
 
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file)
+{
+       return -ENODEV;
+}
+
+static const struct drm_ioctl_desc i915_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+};
+
 static struct drm_driver driver = {
        /* Don't use MTRRs here; the Xserver or userspace app should
         * deal with them for Intel hardware.
@@ -1710,18 +2612,12 @@ static struct drm_driver driver = {
        .driver_features =
            DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
            DRIVER_RENDER | DRIVER_MODESET,
-       .load = i915_driver_load,
-       .unload = i915_driver_unload,
        .open = i915_driver_open,
        .lastclose = i915_driver_lastclose,
        .preclose = i915_driver_preclose,
        .postclose = i915_driver_postclose,
        .set_busid = drm_pci_set_busid,
 
-#if defined(CONFIG_DEBUG_FS)
-       .debugfs_init = i915_debugfs_init,
-       .debugfs_cleanup = i915_debugfs_cleanup,
-#endif
        .gem_free_object = i915_gem_free_object,
        .gem_vm_ops = &i915_gem_vm_ops,
 
@@ -1734,6 +2630,7 @@ static struct drm_driver driver = {
        .dumb_map_offset = i915_gem_mmap_gtt,
        .dumb_destroy = drm_gem_dumb_destroy,
        .ioctls = i915_ioctls,
+       .num_ioctls = ARRAY_SIZE(i915_ioctls),
        .fops = &i915_driver_fops,
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
@@ -1742,56 +2639,3 @@ static struct drm_driver driver = {
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
 };
-
-static struct pci_driver i915_pci_driver = {
-       .name = DRIVER_NAME,
-       .id_table = pciidlist,
-       .probe = i915_pci_probe,
-       .remove = i915_pci_remove,
-       .driver.pm = &i915_pm_ops,
-};
-
-static int __init i915_init(void)
-{
-       driver.num_ioctls = i915_max_ioctl;
-
-       /*
-        * Enable KMS by default, unless explicitly overriden by
-        * either the i915.modeset prarameter or by the
-        * vga_text_mode_force boot option.
-        */
-
-       if (i915.modeset == 0)
-               driver.driver_features &= ~DRIVER_MODESET;
-
-       if (vgacon_text_force() && i915.modeset == -1)
-               driver.driver_features &= ~DRIVER_MODESET;
-
-       if (!(driver.driver_features & DRIVER_MODESET)) {
-               /* Silently fail loading to not upset userspace. */
-               DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
-               return 0;
-       }
-
-       if (i915.nuclear_pageflip)
-               driver.driver_features |= DRIVER_ATOMIC;
-
-       return drm_pci_init(&driver, &i915_pci_driver);
-}
-
-static void __exit i915_exit(void)
-{
-       if (!(driver.driver_features & DRIVER_MODESET))
-               return; /* Never loaded a driver. */
-
-       drm_pci_exit(&driver, &i915_pci_driver);
-}
-
-module_init(i915_init);
-module_exit(i915_exit);
-
-MODULE_AUTHOR("Tungsten Graphics, Inc.");
-MODULE_AUTHOR("Intel Corporation");
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
index 5faacc6e548d65cde601f478aa16c3efb7a5c6ca..915a3d0acff31fbf6b6044de4bde7a45174107ce 100644 (file)
@@ -47,6 +47,7 @@
 #include <drm/intel-gtt.h>
 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
 #include <drm/drm_gem.h>
+#include <drm/drm_auth.h>
 
 #include "i915_params.h"
 #include "i915_reg.h"
 #include "i915_gem_gtt.h"
 #include "i915_gem_render_state.h"
 
+#include "intel_gvt.h"
+
 /* General customization:
  */
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20160425"
+#define DRIVER_DATE            "20160711"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -281,6 +284,9 @@ struct i915_hotplug {
        u32 short_port_mask;
        struct work_struct dig_port_work;
 
+       struct work_struct poll_init_work;
+       bool poll_enabled;
+
        /*
         * if we get a HPD irq from DP and a HPD irq from non-DP
         * the non-DP HPD could block the workqueue on a mode config
@@ -317,21 +323,36 @@ struct i915_hotplug {
                for_each_if ((__ports_mask) & (1 << (__port)))
 
 #define for_each_crtc(dev, crtc) \
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+       list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
 
 #define for_each_intel_plane(dev, intel_plane) \
        list_for_each_entry(intel_plane,                        \
-                           &dev->mode_config.plane_list,       \
+                           &(dev)->mode_config.plane_list,     \
                            base.head)
 
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask)                \
+       list_for_each_entry(intel_plane,                                \
+                           &(dev)->mode_config.plane_list,             \
+                           base.head)                                  \
+               for_each_if ((plane_mask) &                             \
+                            (1 << drm_plane_index(&intel_plane->base)))
+
 #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane)     \
        list_for_each_entry(intel_plane,                                \
                            &(dev)->mode_config.plane_list,             \
                            base.head)                                  \
                for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
 
-#define for_each_intel_crtc(dev, intel_crtc) \
-       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
+#define for_each_intel_crtc(dev, intel_crtc)                           \
+       list_for_each_entry(intel_crtc,                                 \
+                           &(dev)->mode_config.crtc_list,              \
+                           base.head)
+
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask)           \
+       list_for_each_entry(intel_crtc,                                 \
+                           &(dev)->mode_config.crtc_list,              \
+                           base.head)                                  \
+               for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
 
 #define for_each_intel_encoder(dev, intel_encoder)             \
        list_for_each_entry(intel_encoder,                      \
@@ -340,7 +361,7 @@ struct i915_hotplug {
 
 #define for_each_intel_connector(dev, intel_connector)         \
        list_for_each_entry(intel_connector,                    \
-                           &dev->mode_config.connector_list,   \
+                           &(dev)->mode_config.connector_list, \
                            base.head)
 
 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
@@ -462,6 +483,7 @@ struct drm_i915_error_state {
        struct timeval time;
 
        char error_msg[128];
+       bool simulated;
        int iommu;
        u32 reset_count;
        u32 suspend_count;
@@ -493,6 +515,7 @@ struct drm_i915_error_state {
                bool valid;
                /* Software tracked state */
                bool waiting;
+               int num_waiters;
                int hangcheck_score;
                enum intel_ring_hangcheck_action hangcheck_action;
                int num_requests;
@@ -538,6 +561,12 @@ struct drm_i915_error_state {
                        u32 tail;
                } *requests;
 
+               struct drm_i915_error_waiter {
+                       char comm[TASK_COMM_LEN];
+                       pid_t pid;
+                       u32 seqno;
+               } *waiters;
+
                struct {
                        u32 gfx_mode;
                        union {
@@ -588,6 +617,7 @@ struct drm_i915_display_funcs {
                                       struct intel_crtc_state *newstate);
        void (*initial_watermarks)(struct intel_crtc_state *cstate);
        void (*optimize_watermarks)(struct intel_crtc_state *cstate);
+       int (*compute_global_watermarks)(struct drm_atomic_state *state);
        void (*update_wm)(struct drm_crtc *crtc);
        int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
        void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -612,7 +642,7 @@ struct drm_i915_display_funcs {
                          struct drm_i915_gem_object *obj,
                          struct drm_i915_gem_request *req,
                          uint32_t flags);
-       void (*hpd_irq_setup)(struct drm_device *dev);
+       void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@ -735,6 +765,7 @@ struct intel_csr {
        func(is_valleyview) sep \
        func(is_cherryview) sep \
        func(is_haswell) sep \
+       func(is_broadwell) sep \
        func(is_skylake) sep \
        func(is_broxton) sep \
        func(is_kabylake) sep \
@@ -749,7 +780,8 @@ struct intel_csr {
        func(has_llc) sep \
        func(has_snoop) sep \
        func(has_ddi) sep \
-       func(has_fpga_dbg)
+       func(has_fpga_dbg) sep \
+       func(has_pooled_eu)
 
 #define DEFINE_FLAG(name) u8 name:1
 #define SEP_SEMICOLON ;
@@ -757,9 +789,10 @@ struct intel_csr {
 struct intel_device_info {
        u32 display_mmio_offset;
        u16 device_id;
-       u8 num_pipes:3;
+       u8 num_pipes;
        u8 num_sprites[I915_MAX_PIPES];
        u8 gen;
+       u16 gen_mask;
        u8 ring_mask; /* Rings supported by the HW */
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
        /* Register offsets for the various display pipes and transcoders */
@@ -774,6 +807,7 @@ struct intel_device_info {
        u8 subslice_per_slice;
        u8 eu_total;
        u8 eu_per_subslice;
+       u8 min_eu_in_pool;
        /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
        u8 subslice_7eu[3];
        u8 has_slice_pg:1;
@@ -821,9 +855,8 @@ struct i915_ctx_hang_stats {
 /* This must match up with the value previously used for execbuf2.rsvd1. */
 #define DEFAULT_CONTEXT_HANDLE 0
 
-#define CONTEXT_NO_ZEROMAP (1<<0)
 /**
- * struct intel_context - as the name implies, represents a context.
+ * struct i915_gem_context - as the name implies, represents a context.
  * @ref: reference count.
  * @user_handle: userspace tracking identity for this context.
  * @remap_slice: l3 row remapping information.
@@ -841,33 +874,40 @@ struct i915_ctx_hang_stats {
  * Contexts are memory images used by the hardware to store copies of their
  * internal state.
  */
-struct intel_context {
+struct i915_gem_context {
        struct kref ref;
-       int user_handle;
-       uint8_t remap_slice;
        struct drm_i915_private *i915;
-       int flags;
        struct drm_i915_file_private *file_priv;
-       struct i915_ctx_hang_stats hang_stats;
        struct i915_hw_ppgtt *ppgtt;
 
-       /* Legacy ring buffer submission */
-       struct {
-               struct drm_i915_gem_object *rcs_state;
-               bool initialized;
-       } legacy_hw_ctx;
+       struct i915_ctx_hang_stats hang_stats;
 
-       /* Execlists */
-       struct {
+       /* Unique identifier for this context, used by the hw for tracking */
+       unsigned long flags;
+#define CONTEXT_NO_ZEROMAP             BIT(0)
+#define CONTEXT_NO_ERROR_CAPTURE       BIT(1)
+       unsigned hw_id;
+       u32 user_handle;
+
+       u32 ggtt_alignment;
+
+       struct intel_context {
                struct drm_i915_gem_object *state;
                struct intel_ringbuffer *ringbuf;
-               int pin_count;
                struct i915_vma *lrc_vma;
-               u64 lrc_desc;
                uint32_t *lrc_reg_state;
+               u64 lrc_desc;
+               int pin_count;
+               bool initialised;
        } engine[I915_NUM_ENGINES];
+       u32 ring_size;
+       u32 desc_template;
+       struct atomic_notifier_head status_notifier;
+       bool execlists_force_single_submission;
 
        struct list_head link;
+
+       u8 remap_slice;
 };
 
 enum fb_op_origin {
@@ -990,6 +1030,7 @@ enum intel_pch {
        PCH_CPT,        /* Cougarpoint PCH */
        PCH_LPT,        /* Lynxpoint PCH */
        PCH_SPT,        /* Sunrisepoint PCH */
+       PCH_KBP,        /* Kabypoint PCH */
        PCH_NOP,
 };
 
@@ -1115,6 +1156,8 @@ struct intel_gen6_power_mgmt {
        bool interrupts_enabled;
        u32 pm_iir;
 
+       u32 pm_intr_keep;
+
        /* Frequencies are stored in potentially platform dependent multiples.
         * In other words, *_freq needs to be multiplied by X to be interesting.
         * Soft limits are those which are used for the dynamic reclocking done
@@ -1281,38 +1324,12 @@ struct i915_gem_mm {
        /** LRU list of objects with fence regs on them. */
        struct list_head fence_list;
 
-       /**
-        * We leave the user IRQ off as much as possible,
-        * but this means that requests will finish and never
-        * be retired once the system goes idle. Set a timer to
-        * fire periodically while the ring is running. When it
-        * fires, go retire requests.
-        */
-       struct delayed_work retire_work;
-
-       /**
-        * When we detect an idle GPU, we want to turn on
-        * powersaving features. So once we see that there
-        * are no more requests outstanding and no more
-        * arrive within a small period of time, we fire
-        * off the idle_work.
-        */
-       struct delayed_work idle_work;
-
        /**
         * Are we in a non-interruptible section of code like
         * modesetting?
         */
        bool interruptible;
 
-       /**
-        * Is the GPU currently considered idle, or busy executing userspace
-        * requests?  Whilst idle, we attempt to power down the hardware and
-        * display clocks. In order to reduce the effect on performance, there
-        * is a slight delay before we do so.
-        */
-       bool busy;
-
        /* the indicator for dispatch video commands on two BSD rings */
        unsigned int bsd_ring_dispatch_index;
 
@@ -1349,7 +1366,6 @@ struct i915_gpu_error {
        /* Hang gpu twice in this window and your context gets banned */
 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
 
-       struct workqueue_struct *hangcheck_wq;
        struct delayed_work hangcheck_work;
 
        /* For reset and error_state handling. */
@@ -1385,21 +1401,20 @@ struct i915_gpu_error {
 #define I915_RESET_IN_PROGRESS_FLAG    1
 #define I915_WEDGED                    (1 << 31)
 
+       /**
+        * Waitqueue to signal when a hang is detected. Used to for waiters
+        * to release the struct_mutex for the reset to procede.
+        */
+       wait_queue_head_t wait_queue;
+
        /**
         * Waitqueue to signal when the reset has completed. Used by clients
         * that wait for dev_priv->mm.wedged to settle.
         */
        wait_queue_head_t reset_queue;
 
-       /* Userspace knobs for gpu hang simulation;
-        * combines both a ring mask, and extra flags
-        */
-       u32 stop_rings;
-#define I915_STOP_RING_ALLOW_BAN       (1 << 31)
-#define I915_STOP_RING_ALLOW_WARN      (1 << 30)
-
        /* For missed irq/seqno simulation. */
-       unsigned int test_irq_rings;
+       unsigned long test_irq_rings;
 };
 
 enum modeset_restore {
@@ -1488,6 +1503,7 @@ struct intel_vbt_data {
                bool present;
                bool active_low_pwm;
                u8 min_brightness;      /* min_brightness/255 of max */
+               enum intel_backlight_type type;
        } backlight;
 
        /* MIPI DSI */
@@ -1580,7 +1596,7 @@ struct skl_ddb_allocation {
 };
 
 struct skl_wm_values {
-       bool dirty[I915_MAX_PIPES];
+       unsigned dirty_pipes;
        struct skl_ddb_allocation ddb;
        uint32_t wm_linetime[I915_MAX_PIPES];
        uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@@ -1697,7 +1713,7 @@ struct i915_execbuffer_params {
        uint64_t                        batch_obj_vm_offset;
        struct intel_engine_cs *engine;
        struct drm_i915_gem_object      *batch_obj;
-       struct intel_context            *ctx;
+       struct i915_gem_context            *ctx;
        struct drm_i915_gem_request     *request;
 };
 
@@ -1709,7 +1725,8 @@ struct intel_wm_config {
 };
 
 struct drm_i915_private {
-       struct drm_device *dev;
+       struct drm_device drm;
+
        struct kmem_cache *objects;
        struct kmem_cache *vmas;
        struct kmem_cache *requests;
@@ -1724,6 +1741,8 @@ struct drm_i915_private {
 
        struct i915_virtual_gpu vgpu;
 
+       struct intel_gvt gvt;
+
        struct intel_guc guc;
 
        struct intel_csr csr;
@@ -1747,6 +1766,7 @@ struct drm_i915_private {
        wait_queue_head_t gmbus_wait_queue;
 
        struct pci_dev *bridge_dev;
+       struct i915_gem_context *kernel_context;
        struct intel_engine_cs engine[I915_NUM_ENGINES];
        struct drm_i915_gem_object *semaphore_obj;
        uint32_t last_seqno, next_seqno;
@@ -1802,13 +1822,17 @@ struct drm_i915_private {
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 
        unsigned int fsb_freq, mem_freq, is_ddr3;
-       unsigned int skl_boot_cdclk;
+       unsigned int skl_preferred_vco_freq;
        unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
        unsigned int max_dotclk_freq;
        unsigned int rawclk_freq;
        unsigned int hpll_freq;
        unsigned int czclk_freq;
 
+       struct {
+               unsigned int vco, ref;
+       } cdclk_pll;
+
        /**
         * wq - Driver workqueue for GEM.
         *
@@ -1838,6 +1862,13 @@ struct drm_i915_private {
        DECLARE_HASHTABLE(mm_structs, 7);
        struct mutex mm_lock;
 
+       /* The hw wants to have a stable context identifier for the lifetime
+        * of the context (for OA, PASID, faults, etc). This is limited
+        * in execlists to 21 bits.
+        */
+       struct ida context_hw_ida;
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+
        /* Kernel Modesetting */
 
        struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1950,9 +1981,6 @@ struct drm_i915_private {
                 */
                uint16_t skl_latency[8];
 
-               /* Committed wm config */
-               struct intel_wm_config config;
-
                /*
                 * The skl_wm_values structure is a bit too big for stack
                 * allocation, so we keep the staging struct where we store
@@ -1975,6 +2003,13 @@ struct drm_i915_private {
                 * cstate->wm.need_postvbl_update.
                 */
                struct mutex wm_mutex;
+
+               /*
+                * Set during HW readout of watermarks/DDB.  Some platforms
+                * need to know when we're still using BIOS-provided values
+                * (which we don't fully trust).
+                */
+               bool distrust_bios_wm;
        } wm;
 
        struct i915_runtime_pm pm;
@@ -1987,9 +2022,35 @@ struct drm_i915_private {
                int (*init_engines)(struct drm_device *dev);
                void (*cleanup_engine)(struct intel_engine_cs *engine);
                void (*stop_engine)(struct intel_engine_cs *engine);
-       } gt;
 
-       struct intel_context *kernel_context;
+               /**
+                * Is the GPU currently considered idle, or busy executing
+                * userspace requests? Whilst idle, we allow runtime power
+                * management to power down the hardware and display clocks.
+                * In order to reduce the effect on performance, there
+                * is a slight delay before we do so.
+                */
+               unsigned int active_engines;
+               bool awake;
+
+               /**
+                * We leave the user IRQ off as much as possible,
+                * but this means that requests will finish and never
+                * be retired once the system goes idle. Set a timer to
+                * fire periodically while the ring is running. When it
+                * fires, go retire requests.
+                */
+               struct delayed_work retire_work;
+
+               /**
+                * When we detect an idle GPU, we want to turn on
+                * powersaving features. So once we see that there
+                * are no more requests outstanding and no more
+                * arrive within a small period of time, we fire
+                * off the idle_work.
+                */
+               struct delayed_work idle_work;
+       } gt;
 
        /* perform PHY state sanity checks? */
        bool chv_phy_assert[2];
@@ -2004,7 +2065,7 @@ struct drm_i915_private {
 
 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
 {
-       return dev->dev_private;
+       return container_of(dev, struct drm_i915_private, drm);
 }
 
 static inline struct drm_i915_private *dev_to_i915(struct device *dev)
@@ -2175,6 +2236,7 @@ struct drm_i915_gem_object {
 
        unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
 
+       unsigned int has_wc_mmap;
        unsigned int pin_display;
 
        struct sg_table *pages;
@@ -2227,9 +2289,81 @@ struct drm_i915_gem_object {
 };
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
-                      struct drm_i915_gem_object *new,
-                      unsigned frontbuffer_bits);
+static inline bool
+i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
+{
+       return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
+}
+
+/*
+ * Optimised SGL iterator for GEM objects
+ */
+static __always_inline struct sgt_iter {
+       struct scatterlist *sgp;
+       union {
+               unsigned long pfn;
+               dma_addr_t dma;
+       };
+       unsigned int curr;
+       unsigned int max;
+} __sgt_iter(struct scatterlist *sgl, bool dma) {
+       struct sgt_iter s = { .sgp = sgl };
+
+       if (s.sgp) {
+               s.max = s.curr = s.sgp->offset;
+               s.max += s.sgp->length;
+               if (dma)
+                       s.dma = sg_dma_address(s.sgp);
+               else
+                       s.pfn = page_to_pfn(sg_page(s.sgp));
+       }
+
+       return s;
+}
+
+/**
+ * __sg_next - return the next scatterlist entry in a list
+ * @sg:                The current sg entry
+ *
+ * Description:
+ *   If the entry is the last, return NULL; otherwise, step to the next
+ *   element in the array (@sg@+1). If that's a chain pointer, follow it;
+ *   otherwise just return the pointer to the current element.
+ **/
+static inline struct scatterlist *__sg_next(struct scatterlist *sg)
+{
+#ifdef CONFIG_DEBUG_SG
+       BUG_ON(sg->sg_magic != SG_MAGIC);
+#endif
+       return sg_is_last(sg) ? NULL :
+               likely(!sg_is_chain(++sg)) ? sg :
+               sg_chain_ptr(sg);
+}
+
+/**
+ * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
+ * @__dmap:    DMA address (output)
+ * @__iter:    'struct sgt_iter' (iterator state, internal)
+ * @__sgt:     sg_table to iterate over (input)
+ */
+#define for_each_sgt_dma(__dmap, __iter, __sgt)                                \
+       for ((__iter) = __sgt_iter((__sgt)->sgl, true);                 \
+            ((__dmap) = (__iter).dma + (__iter).curr);                 \
+            (((__iter).curr += PAGE_SIZE) < (__iter).max) ||           \
+            ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
+
+/**
+ * for_each_sgt_page - iterate over the pages of the given sg_table
+ * @__pp:      page pointer (output)
+ * @__iter:    'struct sgt_iter' (iterator state, internal)
+ * @__sgt:     sg_table to iterate over (input)
+ */
+#define for_each_sgt_page(__pp, __iter, __sgt)                         \
+       for ((__iter) = __sgt_iter((__sgt)->sgl, false);                \
+            ((__pp) = (__iter).pfn == 0 ? NULL :                       \
+             pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
+            (((__iter).curr += PAGE_SIZE) < (__iter).max) ||           \
+            ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
 
 /**
  * Request queue structure.
@@ -2251,7 +2385,7 @@ struct drm_i915_gem_request {
        /** On Which ring this request was generated */
        struct drm_i915_private *i915;
        struct intel_engine_cs *engine;
-       unsigned reset_counter;
+       struct intel_signal_node signaling;
 
         /** GEM sequence number associated with the previous request,
          * when the HWS breadcrumb is equal to this the GPU is processing
@@ -2278,6 +2412,9 @@ struct drm_i915_gem_request {
        /** Position in the ringbuffer of the end of the whole request */
        u32 tail;
 
+       /** Preallocate space in the ringbuffer for the emitting the request */
+       u32 reserved_space;
+
        /**
         * Context and ring buffer related to this request
         * Contexts are refcounted, so when this request is associated with a
@@ -2288,9 +2425,20 @@ struct drm_i915_gem_request {
         * i915_gem_request_free() will then decrement the refcount on the
         * context.
         */
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        struct intel_ringbuffer *ringbuf;
 
+       /**
+        * Context related to the previous request.
+        * As the contexts are accessed by the hardware until the switch is
+        * completed to a new context, the hardware may still be writing
+        * to the context object after the breadcrumb is visible. We must
+        * not unpin/unbind/prune that object whilst still active and so
+        * we keep the previous context pinned until the following (this)
+        * request is retired.
+        */
+       struct i915_gem_context *previous_context;
+
        /** Batch buffer related to this request if any (used for
            error state dump only) */
        struct drm_i915_gem_object *batch_obj;
@@ -2327,11 +2475,13 @@ struct drm_i915_gem_request {
        /** Execlists no. of times this request has been sent to the ELSP */
        int elsp_submitted;
 
+       /** Execlists context hardware id. */
+       unsigned ctx_hw_id;
 };
 
 struct drm_i915_gem_request * __must_check
 i915_gem_request_alloc(struct intel_engine_cs *engine,
-                      struct intel_context *ctx);
+                      struct i915_gem_context *ctx);
 void i915_gem_request_free(struct kref *req_ref);
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
                                   struct drm_file *file);
@@ -2359,23 +2509,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
 static inline void
 i915_gem_request_unreference(struct drm_i915_gem_request *req)
 {
-       WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
        kref_put(&req->ref, i915_gem_request_free);
 }
 
-static inline void
-i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
-{
-       struct drm_device *dev;
-
-       if (!req)
-               return;
-
-       dev = req->engine->dev;
-       if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
-               mutex_unlock(&dev->struct_mutex);
-}
-
 static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
                                           struct drm_i915_gem_request *src)
 {
@@ -2503,9 +2639,29 @@ struct drm_i915_cmd_table {
 #define INTEL_INFO(p)  (&__I915__(p)->info)
 #define INTEL_GEN(p)   (INTEL_INFO(p)->gen)
 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
-#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
 
 #define REVID_FOREVER          0xff
+#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
+
+#define GEN_FOREVER (0)
+/*
+ * Returns true if Gen is in inclusive range [Start, End].
+ *
+ * Use GEN_FOREVER for unbound start and or end.
+ */
+#define IS_GEN(p, s, e) ({ \
+       unsigned int __s = (s), __e = (e); \
+       BUILD_BUG_ON(!__builtin_constant_p(s)); \
+       BUILD_BUG_ON(!__builtin_constant_p(e)); \
+       if ((__s) != GEN_FOREVER) \
+               __s = (s) - 1; \
+       if ((__e) == GEN_FOREVER) \
+               __e = BITS_PER_LONG - 1; \
+       else \
+               __e = (e) - 1; \
+       !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
+})
+
 /*
  * Return true if revision is in range [since,until] inclusive.
  *
@@ -2538,7 +2694,7 @@ struct drm_i915_cmd_table {
 #define IS_VALLEYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview)
 #define IS_CHERRYVIEW(dev)     (INTEL_INFO(dev)->is_cherryview)
 #define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
-#define IS_BROADWELL(dev)      (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev))
+#define IS_BROADWELL(dev)      (INTEL_INFO(dev)->is_broadwell)
 #define IS_SKYLAKE(dev)        (INTEL_INFO(dev)->is_skylake)
 #define IS_BROXTON(dev)                (INTEL_INFO(dev)->is_broxton)
 #define IS_KABYLAKE(dev)       (INTEL_INFO(dev)->is_kabylake)
@@ -2590,6 +2746,8 @@ struct drm_i915_cmd_table {
 #define SKL_REVID_D0           0x3
 #define SKL_REVID_E0           0x4
 #define SKL_REVID_F0           0x5
+#define SKL_REVID_G0           0x6
+#define SKL_REVID_H0           0x7
 
 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
 
@@ -2600,35 +2758,49 @@ struct drm_i915_cmd_table {
 
 #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
 
+#define KBL_REVID_A0           0x0
+#define KBL_REVID_B0           0x1
+#define KBL_REVID_C0           0x2
+#define KBL_REVID_D0           0x3
+#define KBL_REVID_E0           0x4
+
+#define IS_KBL_REVID(p, since, until) \
+       (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+
 /*
  * The genX designation typically refers to the render engine, so render
  * capability related checks should use IS_GEN, while display and other checks
  * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
  * chips, etc.).
  */
-#define IS_GEN2(dev)   (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev)   (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev)   (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev)   (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev)   (INTEL_INFO(dev)->gen == 6)
-#define IS_GEN7(dev)   (INTEL_INFO(dev)->gen == 7)
-#define IS_GEN8(dev)   (INTEL_INFO(dev)->gen == 8)
-#define IS_GEN9(dev)   (INTEL_INFO(dev)->gen == 9)
-
-#define RENDER_RING            (1<<RCS)
-#define BSD_RING               (1<<VCS)
-#define BLT_RING               (1<<BCS)
-#define VEBOX_RING             (1<<VECS)
-#define BSD2_RING              (1<<VCS2)
-#define ALL_ENGINES            (~0)
-
-#define HAS_BSD(dev)           (INTEL_INFO(dev)->ring_mask & BSD_RING)
-#define HAS_BSD2(dev)          (INTEL_INFO(dev)->ring_mask & BSD2_RING)
-#define HAS_BLT(dev)           (INTEL_INFO(dev)->ring_mask & BLT_RING)
-#define HAS_VEBOX(dev)         (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
+#define IS_GEN2(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
+#define IS_GEN3(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
+#define IS_GEN4(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
+#define IS_GEN5(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
+#define IS_GEN6(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
+#define IS_GEN7(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
+#define IS_GEN8(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
+#define IS_GEN9(dev)   (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
+
+#define ENGINE_MASK(id)        BIT(id)
+#define RENDER_RING    ENGINE_MASK(RCS)
+#define BSD_RING       ENGINE_MASK(VCS)
+#define BLT_RING       ENGINE_MASK(BCS)
+#define VEBOX_RING     ENGINE_MASK(VECS)
+#define BSD2_RING      ENGINE_MASK(VCS2)
+#define ALL_ENGINES    (~0)
+
+#define HAS_ENGINE(dev_priv, id) \
+       (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
+
+#define HAS_BSD(dev_priv)      HAS_ENGINE(dev_priv, VCS)
+#define HAS_BSD2(dev_priv)     HAS_ENGINE(dev_priv, VCS2)
+#define HAS_BLT(dev_priv)      HAS_ENGINE(dev_priv, BCS)
+#define HAS_VEBOX(dev_priv)    HAS_ENGINE(dev_priv, VECS)
+
 #define HAS_LLC(dev)           (INTEL_INFO(dev)->has_llc)
 #define HAS_SNOOP(dev)         (INTEL_INFO(dev)->has_snoop)
-#define HAS_EDRAM(dev)         (__I915__(dev)->edram_cap & EDRAM_ENABLED)
+#define HAS_EDRAM(dev)         (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
 #define HAS_WT(dev)            ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
                                 HAS_EDRAM(dev))
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -2646,9 +2818,10 @@ struct drm_i915_cmd_table {
 #define HAS_BROKEN_CS_TLB(dev)         (IS_I830(dev) || IS_845G(dev))
 
 /* WaRsDisableCoarsePowerGating:skl,bxt */
-#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
-                                                IS_SKL_GT3(dev) || \
-                                                IS_SKL_GT4(dev))
+#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
+       (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
+        IS_SKL_GT3(dev_priv) || \
+        IS_SKL_GT4(dev_priv))
 
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -2686,12 +2859,18 @@ struct drm_i915_cmd_table {
                                 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
                                 IS_KABYLAKE(dev) || IS_BROXTON(dev))
 #define HAS_RC6(dev)           (INTEL_INFO(dev)->gen >= 6)
-#define HAS_RC6p(dev)          (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+#define HAS_RC6p(dev)          (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
 
 #define HAS_CSR(dev)   (IS_GEN9(dev))
 
-#define HAS_GUC_UCODE(dev)     (IS_GEN9(dev) && !IS_KABYLAKE(dev))
-#define HAS_GUC_SCHED(dev)     (IS_GEN9(dev) && !IS_KABYLAKE(dev))
+/*
+ * For now, anything with a GuC requires uCode loading, and then supports
+ * command submission once loaded. But these are logically independent
+ * properties, so we have separate macros to test them.
+ */
+#define HAS_GUC(dev)           (IS_GEN9(dev))
+#define HAS_GUC_UCODE(dev)     (HAS_GUC(dev))
+#define HAS_GUC_SCHED(dev)     (HAS_GUC(dev))
 
 #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
                                    INTEL_INFO(dev)->gen >= 8)
@@ -2700,6 +2879,8 @@ struct drm_i915_cmd_table {
                                 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
                                 !IS_BROXTON(dev))
 
+#define HAS_POOLED_EU(dev)     (INTEL_INFO(dev)->has_pooled_eu)
+
 #define INTEL_PCH_DEVICE_ID_MASK               0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE           0x3b00
 #define INTEL_PCH_CPT_DEVICE_ID_TYPE           0x1c00
@@ -2708,11 +2889,13 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE                0x9c00
 #define INTEL_PCH_SPT_DEVICE_ID_TYPE           0xA100
 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE                0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE           0xA200
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE           0x7100
 #define INTEL_PCH_P3X_DEVICE_ID_TYPE           0x7000
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE          0x2900 /* qemu q35 has 2918 */
 
 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
@@ -2734,13 +2917,22 @@ struct drm_i915_cmd_table {
 
 #include "i915_trace.h"
 
-extern const struct drm_ioctl_desc i915_ioctls[];
-extern int i915_max_ioctl;
+static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+       if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
+               return true;
+#endif
+       return false;
+}
 
 extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
-/* i915_dma.c */
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+                               int enable_ppgtt);
+
+/* i915_drv.c */
 void __printf(3, 4)
 __i915_printk(struct drm_i915_private *dev_priv, const char *level,
              const char *fmt, ...);
@@ -2748,21 +2940,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
 #define i915_report_error(dev_priv, fmt, ...)                             \
        __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
 
-extern int i915_driver_load(struct drm_device *, unsigned long flags);
-extern int i915_driver_unload(struct drm_device *);
-extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
-extern void i915_driver_lastclose(struct drm_device * dev);
-extern void i915_driver_preclose(struct drm_device *dev,
-                                struct drm_file *file);
-extern void i915_driver_postclose(struct drm_device *dev,
-                                 struct drm_file *file);
 #ifdef CONFIG_COMPAT
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
 #endif
-extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
-extern bool intel_has_gpu_reset(struct drm_device *dev);
-extern int i915_reset(struct drm_device *dev);
+extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
+extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
+extern int i915_reset(struct drm_i915_private *dev_priv);
 extern int intel_guc_reset(struct drm_i915_private *dev_priv);
 extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2772,30 +2956,51 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
 
 /* intel_hotplug.c */
-void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
+                          u32 pin_mask, u32 long_mask);
 void intel_hpd_init(struct drm_i915_private *dev_priv);
 void intel_hpd_init_work(struct drm_i915_private *dev_priv);
 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
 
 /* i915_irq.c */
-void i915_queue_hangcheck(struct drm_device *dev);
+static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
+{
+       unsigned long delay;
+
+       if (unlikely(!i915.enable_hangcheck))
+               return;
+
+       /* Don't continually defer the hangcheck so that it is always run at
+        * least once after work has been scheduled on any ring. Otherwise,
+        * we will ignore a hung ring if a second ring is kept busy.
+        */
+
+       delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
+       queue_delayed_work(system_long_wq,
+                          &dev_priv->gpu_error.hangcheck_work, delay);
+}
+
 __printf(3, 4)
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+                      u32 engine_mask,
                       const char *fmt, ...);
 
 extern void intel_irq_init(struct drm_i915_private *dev_priv);
 int intel_irq_install(struct drm_i915_private *dev_priv);
 void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
-extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev,
+extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
+extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
                                        bool restore_forcewake);
-extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_init(struct drm_i915_private *dev_priv);
 extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
 extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-extern void intel_uncore_fini(struct drm_device *dev);
-extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
+extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
+extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+                                        bool restore);
 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
                                enum forcewake_domains domains);
@@ -2811,9 +3016,26 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-static inline bool intel_vgpu_active(struct drm_device *dev)
+
+int intel_wait_for_register(struct drm_i915_private *dev_priv,
+                           i915_reg_t reg,
+                           const u32 mask,
+                           const u32 value,
+                           const unsigned long timeout_ms);
+int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+                              i915_reg_t reg,
+                              const u32 mask,
+                              const u32 value,
+                              const unsigned long timeout_ms);
+
+static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
 {
-       return to_i915(dev)->vgpu.active;
+       return dev_priv->gvt.initialized;
+}
+
+static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
+{
+       return dev_priv->vgpu.active;
 }
 
 void
@@ -2870,7 +3092,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
        ibx_display_interrupt_update(dev_priv, bits, 0);
 }
 
-
 /* i915_gem.c */
 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *file_priv);
@@ -2909,7 +3130,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_get_tiling(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
-int i915_gem_init_userptr(struct drm_device *dev);
+void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file);
 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -2919,11 +3140,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
 void i915_gem_load_init(struct drm_device *dev);
 void i915_gem_load_cleanup(struct drm_device *dev);
 void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
+
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                         const struct drm_i915_gem_object_ops *ops);
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
                                                  size_t size);
 struct drm_i915_gem_object *i915_gem_object_create_from_data(
                struct drm_device *dev, const void *data, size_t size);
@@ -2978,6 +3201,23 @@ static inline int __sg_page_count(struct scatterlist *sg)
 struct page *
 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
 
+static inline dma_addr_t
+i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
+{
+       if (n < obj->get_page.last) {
+               obj->get_page.sg = obj->pages->sgl;
+               obj->get_page.last = 0;
+       }
+
+       while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
+               obj->get_page.last += __sg_page_count(obj->get_page.sg++);
+               if (unlikely(sg_is_chain(obj->get_page.sg)))
+                       obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
+       }
+
+       return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
+}
+
 static inline struct page *
 i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
 {
@@ -3054,6 +3294,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_mode_create_dumb *args);
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
                      uint32_t handle, uint64_t *offset);
+
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+                      struct drm_i915_gem_object *new,
+                      unsigned frontbuffer_bits);
+
 /**
  * Returns true if seq1 is later than seq2.
  */
@@ -3063,31 +3308,34 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
        return (int32_t)(seq1 - seq2) >= 0;
 }
 
-static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
-                                          bool lazy_coherency)
+static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
 {
-       if (!lazy_coherency && req->engine->irq_seqno_barrier)
-               req->engine->irq_seqno_barrier(req->engine);
-       return i915_seqno_passed(req->engine->get_seqno(req->engine),
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
                                 req->previous_seqno);
 }
 
-static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
-                                             bool lazy_coherency)
+static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
 {
-       if (!lazy_coherency && req->engine->irq_seqno_barrier)
-               req->engine->irq_seqno_barrier(req->engine);
-       return i915_seqno_passed(req->engine->get_seqno(req->engine),
+       return i915_seqno_passed(intel_engine_get_seqno(req->engine),
                                 req->seqno);
 }
 
-int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
+bool __i915_spin_request(const struct drm_i915_gem_request *request,
+                        int state, unsigned long timeout_us);
+static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
+                                    int state, unsigned long timeout_us)
+{
+       return (i915_gem_request_started(request) &&
+               __i915_spin_request(request, state, timeout_us));
+}
+
+int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
-bool i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
 void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
 
 static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3130,27 +3378,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
        return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
 }
 
-static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
-{
-       return dev_priv->gpu_error.stop_rings == 0 ||
-               dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
-}
-
-static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
-{
-       return dev_priv->gpu_error.stop_rings == 0 ||
-               dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
-}
-
 void i915_gem_reset(struct drm_device *dev);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_init(struct drm_device *dev);
 int i915_gem_init_engines(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_engines(struct drm_device *dev);
-int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
                        struct drm_i915_gem_object *batch_obj,
@@ -3215,8 +3450,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
                        struct i915_address_space *vm);
 
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
-                               struct i915_address_space *vm);
 struct i915_vma *
 i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
                    struct i915_address_space *vm);
@@ -3251,14 +3484,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
        return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
 }
 
-static inline unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-       return i915_gem_obj_size(obj, &ggtt->base);
-}
+unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
 
 static inline int __must_check
 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
@@ -3272,12 +3499,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
                                   alignment, flags | PIN_GLOBAL);
 }
 
-static inline int
-i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
-{
-       return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
-}
-
 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
                                     const struct i915_ggtt_view *view);
 static inline void
@@ -3301,28 +3522,44 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_lost(struct drm_i915_private *dev_priv);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_gem_request *req);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct drm_i915_gem_request *req);
-struct intel_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
 void i915_gem_context_free(struct kref *ctx_ref);
 struct drm_i915_gem_object *
 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
-static inline void i915_gem_context_reference(struct intel_context *ctx)
+struct i915_gem_context *
+i915_gem_context_create_gvt(struct drm_device *dev);
+
+static inline struct i915_gem_context *
+i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
+{
+       struct i915_gem_context *ctx;
+
+       lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
+
+       ctx = idr_find(&file_priv->context_idr, id);
+       if (!ctx)
+               return ERR_PTR(-ENOENT);
+
+       return ctx;
+}
+
+static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
 {
        kref_get(&ctx->ref);
 }
 
-static inline void i915_gem_context_unreference(struct intel_context *ctx)
+static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
 {
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        kref_put(&ctx->ref, i915_gem_context_free);
 }
 
-static inline bool i915_gem_context_is_default(const struct intel_context *c)
+static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
 {
        return c->user_handle == DEFAULT_CONTEXT_HANDLE;
 }
@@ -3335,6 +3572,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv);
 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv);
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
+                                      struct drm_file *file);
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -3349,9 +3588,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 
 /* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_device *dev)
+static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
 {
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                intel_gtt_chipset_flush();
 }
 
@@ -3392,7 +3631,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
        return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
                obj->tiling_mode != I915_TILING_NONE;
@@ -3406,12 +3645,14 @@ int i915_verify_lists(struct drm_device *dev);
 #endif
 
 /* i915_debugfs.c */
-int i915_debugfs_init(struct drm_minor *minor);
-void i915_debugfs_cleanup(struct drm_minor *minor);
 #ifdef CONFIG_DEBUG_FS
+int i915_debugfs_register(struct drm_i915_private *dev_priv);
+void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
 int i915_debugfs_connector_add(struct drm_connector *connector);
 void intel_display_crc_init(struct drm_device *dev);
 #else
+static inline int i915_debugfs_register(struct drm_i915_private *) {return 0;}
+static inline void i915_debugfs_unregister(struct drm_i915_private *) {}
 static inline int i915_debugfs_connector_add(struct drm_connector *connector)
 { return 0; }
 static inline void intel_display_crc_init(struct drm_device *dev) {}
@@ -3430,18 +3671,19 @@ static inline void i915_error_state_buf_release(
 {
        kfree(eb->buf);
 }
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+                             u32 engine_mask,
                              const char *error_msg);
 void i915_error_state_get(struct drm_device *dev,
                          struct i915_error_state_file_priv *error_priv);
 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
 void i915_destroy_error_state(struct drm_device *dev);
 
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 
 /* i915_cmd_parser.c */
-int i915_cmd_parser_get_version(void);
+int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
 int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
 void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
 bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@@ -3481,6 +3723,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv);
 bool intel_bios_is_valid_vbt(const void *buf, size_t size);
 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
@@ -3489,31 +3732,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
 
 /* intel_opregion.c */
 #ifdef CONFIG_ACPI
-extern int intel_opregion_setup(struct drm_device *dev);
-extern void intel_opregion_init(struct drm_device *dev);
-extern void intel_opregion_fini(struct drm_device *dev);
-extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
+extern void intel_opregion_register(struct drm_i915_private *dev_priv);
+extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
                                         bool enable);
-extern int intel_opregion_notify_adapter(struct drm_device *dev,
+extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
                                         pci_power_t state);
-extern int intel_opregion_get_panel_type(struct drm_device *dev);
+extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
 #else
-static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
-static inline void intel_opregion_init(struct drm_device *dev) { return; }
-static inline void intel_opregion_fini(struct drm_device *dev) { return; }
-static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
 static inline int
 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
 {
        return 0;
 }
 static inline int
-intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
 {
        return 0;
 }
-static inline int intel_opregion_get_panel_type(struct drm_device *dev)
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
 {
        return -ENODEV;
 }
@@ -3528,36 +3773,45 @@ static inline void intel_register_dsm_handler(void) { return; }
 static inline void intel_unregister_dsm_handler(void) { return; }
 #endif /* CONFIG_ACPI */
 
+/* intel_device_info.c */
+static inline struct intel_device_info *
+mkwrite_device_info(struct drm_i915_private *dev_priv)
+{
+       return (struct intel_device_info *)&dev_priv->info;
+}
+
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
+void intel_device_info_dump(struct drm_i915_private *dev_priv);
+
 /* modesetting */
 extern void intel_modeset_init_hw(struct drm_device *dev);
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_gem_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
-extern void intel_connector_unregister(struct intel_connector *);
+extern int intel_connector_register(struct drm_connector *);
+extern void intel_connector_unregister(struct drm_connector *);
 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
 extern void intel_display_resume(struct drm_device *dev);
 extern void i915_redisable_vga(struct drm_device *dev);
 extern void i915_redisable_vga_power_on(struct drm_device *dev);
-extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
 extern void intel_init_pch_refclk(struct drm_device *dev);
-extern void intel_set_rps(struct drm_device *dev, u8 val);
+extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
                                  bool enable);
-extern void intel_detect_pch(struct drm_device *dev);
-extern int intel_enable_rc6(const struct drm_device *dev);
 
-extern bool i915_semaphore_is_enabled(struct drm_device *dev);
+extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
-int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
-                              struct drm_file *file);
 
 /* overlay */
-extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
+extern struct intel_overlay_error_state *
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct intel_overlay_error_state *error);
 
-extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_i915_private *dev_priv);
 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct drm_device *dev,
                                            struct intel_display_error_state *error);
@@ -3586,6 +3840,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 
+/* intel_dpio_phy.c */
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 deemph_reg_value, u32 margin_reg_value,
+                             bool uniq_trans_scale);
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+                             bool reset);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder);
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 demph_reg_value, u32 preemph_reg_value,
+                             u32 uniqtranscale_reg_value, u32 tx3_demph);
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void vlv_phy_reset_lanes(struct intel_encoder *encoder);
+
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 
@@ -3659,6 +3931,7 @@ __raw_write(64, q)
  */
 #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
 #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
 #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
 
 /* "Broadcast RGB" property */
@@ -3722,12 +3995,80 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
                            schedule_timeout_uninterruptible(remaining_jiffies);
        }
 }
-
-static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
-                                     struct drm_i915_gem_request *req)
+static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
 {
-       if (engine->trace_irq_req == NULL && engine->irq_get(engine))
-               i915_gem_request_assign(&engine->trace_irq_req, req);
+       struct intel_engine_cs *engine = req->engine;
+
+       /* Before we do the heavier coherent read of the seqno,
+        * check the value (hopefully) in the CPU cacheline.
+        */
+       if (i915_gem_request_completed(req))
+               return true;
+
+       /* Ensure our read of the seqno is coherent so that we
+        * do not "miss an interrupt" (i.e. if this is the last
+        * request and the seqno write from the GPU is not visible
+        * by the time the interrupt fires, we will see that the
+        * request is incomplete and go back to sleep awaiting
+        * another interrupt that will never come.)
+        *
+        * Strictly, we only need to do this once after an interrupt,
+        * but it is easier and safer to do it every time the waiter
+        * is woken.
+        */
+       if (engine->irq_seqno_barrier &&
+           READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
+           cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
+               struct task_struct *tsk;
+
+               /* The ordering of irq_posted versus applying the barrier
+                * is crucial. The clearing of the current irq_posted must
+                * be visible before we perform the barrier operation,
+                * such that if a subsequent interrupt arrives, irq_posted
+                * is reasserted and our task rewoken (which causes us to
+                * do another __i915_request_irq_complete() immediately
+                * and reapply the barrier). Conversely, if the clear
+                * occurs after the barrier, then an interrupt that arrived
+                * whilst we waited on the barrier would not trigger a
+                * barrier on the next pass, and the read may not see the
+                * seqno update.
+                */
+               engine->irq_seqno_barrier(engine);
+
+               /* If we consume the irq, but we are no longer the bottom-half,
+                * the real bottom-half may not have serialised their own
+                * seqno check with the irq-barrier (i.e. may have inspected
+                * the seqno before we believe it coherent since they see
+                * irq_posted == false but we are still running).
+                */
+               rcu_read_lock();
+               tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+               if (tsk && tsk != current)
+                       /* Note that if the bottom-half is changed as we
+                        * are sending the wake-up, the new bottom-half will
+                        * be woken by whomever made the change. We only have
+                        * to worry about when we steal the irq-posted for
+                        * ourself.
+                        */
+                       wake_up_process(tsk);
+               rcu_read_unlock();
+
+               if (i915_gem_request_completed(req))
+                       return true;
+       }
+
+       /* We need to check whether any gpu reset happened in between
+        * the request being submitted and now. If a reset has occurred,
+        * the seqno will have been advance past ours and our request
+        * is complete. If we are in the process of handling a reset,
+        * the request is effectively complete as the rendering will
+        * be discarded, but we need to return in order to drop the
+        * struct_mutex.
+        */
+       if (i915_reset_in_progress(&req->i915->gpu_error))
+               return true;
+
+       return false;
 }
 
 #endif
index aad26851cee3c87514fc9f4162a7bff2ac88ec75..7fd44980798fd048714e927e8a305b94795175a0 100644 (file)
@@ -54,12 +54,33 @@ static bool cpu_cache_is_coherent(struct drm_device *dev,
 
 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
+       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+               return false;
+
        if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
                return true;
 
        return obj->pin_display;
 }
 
+static int
+insert_mappable_node(struct drm_i915_private *i915,
+                     struct drm_mm_node *node, u32 size)
+{
+       memset(node, 0, sizeof(*node));
+       return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
+                                                  size, 0, 0, 0,
+                                                  i915->ggtt.mappable_end,
+                                                  DRM_MM_SEARCH_DEFAULT,
+                                                  DRM_MM_CREATE_DEFAULT);
+}
+
+static void
+remove_mappable_node(struct drm_mm_node *node)
+{
+       drm_mm_remove_node(node);
+}
+
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
                                  size_t size)
@@ -107,7 +128,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -177,7 +198,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
                vaddr += PAGE_SIZE;
        }
 
-       i915_gem_chipset_flush(obj->base.dev);
+       i915_gem_chipset_flush(to_i915(obj->base.dev));
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
@@ -347,7 +368,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        }
 
        drm_clflush_virt_range(vaddr, args->size);
-       i915_gem_chipset_flush(dev);
+       i915_gem_chipset_flush(to_i915(dev));
 
 out:
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -356,13 +377,13 @@ out:
 
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        kmem_cache_free(dev_priv->objects, obj);
 }
 
@@ -381,9 +402,9 @@ i915_gem_create(struct drm_file *file,
                return -EINVAL;
 
        /* Allocate the new object */
-       obj = i915_gem_alloc_object(dev, size);
-       if (obj == NULL)
-               return -ENOMEM;
+       obj = i915_gem_object_create(dev, size);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        ret = drm_gem_handle_create(file, &obj->base, &handle);
        /* drop reference from allocate - handle holds it now */
@@ -409,6 +430,9 @@ i915_gem_dumb_create(struct drm_file *file,
 
 /**
  * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
  */
 int
 i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -484,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 
        *needs_clflush = 0;
 
-       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+       if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
                return -EINVAL;
 
        if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -585,6 +609,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
        return ret ? - EFAULT : 0;
 }
 
+static inline unsigned long
+slow_user_access(struct io_mapping *mapping,
+                uint64_t page_base, int page_offset,
+                char __user *user_data,
+                unsigned long length, bool pwrite)
+{
+       void __iomem *ioaddr;
+       void *vaddr;
+       uint64_t unwritten;
+
+       ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
+       /* We can use the cpu mem copy function because this is X86. */
+       vaddr = (void __force *)ioaddr + page_offset;
+       if (pwrite)
+               unwritten = __copy_from_user(vaddr, user_data, length);
+       else
+               unwritten = __copy_to_user(user_data, vaddr, length);
+
+       io_mapping_unmap(ioaddr);
+       return unwritten;
+}
+
+static int
+i915_gem_gtt_pread(struct drm_device *dev,
+                  struct drm_i915_gem_object *obj, uint64_t size,
+                  uint64_t data_offset, uint64_t data_ptr)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct drm_mm_node node;
+       char __user *user_data;
+       uint64_t remain;
+       uint64_t offset;
+       int ret;
+
+       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+       if (ret) {
+               ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+               if (ret)
+                       goto out;
+
+               ret = i915_gem_object_get_pages(obj);
+               if (ret) {
+                       remove_mappable_node(&node);
+                       goto out;
+               }
+
+               i915_gem_object_pin_pages(obj);
+       } else {
+               node.start = i915_gem_obj_ggtt_offset(obj);
+               node.allocated = false;
+               ret = i915_gem_object_put_fence(obj);
+               if (ret)
+                       goto out_unpin;
+       }
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, false);
+       if (ret)
+               goto out_unpin;
+
+       user_data = u64_to_user_ptr(data_ptr);
+       remain = size;
+       offset = data_offset;
+
+       mutex_unlock(&dev->struct_mutex);
+       if (likely(!i915.prefault_disable)) {
+               ret = fault_in_multipages_writeable(user_data, remain);
+               if (ret) {
+                       mutex_lock(&dev->struct_mutex);
+                       goto out_unpin;
+               }
+       }
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * page_base = page offset within aperture
+                * page_offset = offset within page
+                * page_length = bytes to copy for this page
+                */
+               u32 page_base = node.start;
+               unsigned page_offset = offset_in_page(offset);
+               unsigned page_length = PAGE_SIZE - page_offset;
+               page_length = remain < page_length ? remain : page_length;
+               if (node.allocated) {
+                       wmb();
+                       ggtt->base.insert_page(&ggtt->base,
+                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+                                              node.start,
+                                              I915_CACHE_NONE, 0);
+                       wmb();
+               } else {
+                       page_base += offset & PAGE_MASK;
+               }
+               /* This is a slow read/write as it tries to read from
+                * and write to user memory which may result into page
+                * faults, and so we cannot perform this under struct_mutex.
+                */
+               if (slow_user_access(ggtt->mappable, page_base,
+                                    page_offset, user_data,
+                                    page_length, false)) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+               /* The user has modified the object whilst we tried
+                * reading from it, and we now have no idea what domain
+                * the pages should be in. As we have just been touching
+                * them directly, flush everything back to the GTT
+                * domain.
+                */
+               ret = i915_gem_object_set_to_gtt_domain(obj, false);
+       }
+
+out_unpin:
+       if (node.allocated) {
+               wmb();
+               ggtt->base.clear_range(&ggtt->base,
+                                      node.start, node.size,
+                                      true);
+               i915_gem_object_unpin_pages(obj);
+               remove_mappable_node(&node);
+       } else {
+               i915_gem_object_ggtt_unpin(obj);
+       }
+out:
+       return ret;
+}
+
 static int
 i915_gem_shmem_pread(struct drm_device *dev,
                     struct drm_i915_gem_object *obj,
@@ -600,6 +760,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
        int needs_clflush = 0;
        struct sg_page_iter sg_iter;
 
+       if (!i915_gem_object_has_struct_page(obj))
+               return -ENODEV;
+
        user_data = u64_to_user_ptr(args->data_ptr);
        remain = args->size;
 
@@ -672,6 +835,9 @@ out:
 
 /**
  * Reads data from the object referenced by handle.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
  *
  * On error, the contents of *data are undefined.
  */
@@ -708,18 +874,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       /* prime objects have no backing filp to GEM pread/pwrite
-        * pages from.
-        */
-       if (!obj->base.filp) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        trace_i915_gem_object_pread(obj, args->offset, args->size);
 
        ret = i915_gem_shmem_pread(dev, obj, args, file);
 
+       /* pread for non shmem backed objects */
+       if (ret == -EFAULT || ret == -ENODEV)
+               ret = i915_gem_gtt_pread(dev, obj, args->size,
+                                       args->offset, args->data_ptr);
+
 out:
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -753,60 +916,99 @@ fast_user_write(struct io_mapping *mapping,
 /**
  * This is the fast pwrite path, where we copy the data directly from the
  * user into the GTT, uncached.
+ * @dev: drm device pointer
+ * @obj: i915 gem object
+ * @args: pwrite arguments structure
+ * @file: drm file pointer
  */
 static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                         struct drm_i915_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       ssize_t remain;
-       loff_t offset, page_base;
+       struct i915_ggtt *ggtt = &i915->ggtt;
+       struct drm_device *dev = obj->base.dev;
+       struct drm_mm_node node;
+       uint64_t remain, offset;
        char __user *user_data;
-       int page_offset, page_length, ret;
+       int ret;
+       bool hit_slow_path = false;
+
+       if (obj->tiling_mode != I915_TILING_NONE)
+               return -EFAULT;
 
        ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
-       if (ret)
-               goto out;
+       if (ret) {
+               ret = insert_mappable_node(i915, &node, PAGE_SIZE);
+               if (ret)
+                       goto out;
+
+               ret = i915_gem_object_get_pages(obj);
+               if (ret) {
+                       remove_mappable_node(&node);
+                       goto out;
+               }
+
+               i915_gem_object_pin_pages(obj);
+       } else {
+               node.start = i915_gem_obj_ggtt_offset(obj);
+               node.allocated = false;
+               ret = i915_gem_object_put_fence(obj);
+               if (ret)
+                       goto out_unpin;
+       }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, true);
        if (ret)
                goto out_unpin;
 
-       ret = i915_gem_object_put_fence(obj);
-       if (ret)
-               goto out_unpin;
+       intel_fb_obj_invalidate(obj, ORIGIN_GTT);
+       obj->dirty = true;
 
        user_data = u64_to_user_ptr(args->data_ptr);
+       offset = args->offset;
        remain = args->size;
-
-       offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
-
-       intel_fb_obj_invalidate(obj, ORIGIN_GTT);
-
-       while (remain > 0) {
+       while (remain) {
                /* Operation in this page
                 *
                 * page_base = page offset within aperture
                 * page_offset = offset within page
                 * page_length = bytes to copy for this page
                 */
-               page_base = offset & PAGE_MASK;
-               page_offset = offset_in_page(offset);
-               page_length = remain;
-               if ((page_offset + remain) > PAGE_SIZE)
-                       page_length = PAGE_SIZE - page_offset;
-
+               u32 page_base = node.start;
+               unsigned page_offset = offset_in_page(offset);
+               unsigned page_length = PAGE_SIZE - page_offset;
+               page_length = remain < page_length ? remain : page_length;
+               if (node.allocated) {
+                       wmb(); /* flush the write before we modify the GGTT */
+                       ggtt->base.insert_page(&ggtt->base,
+                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+                                              node.start, I915_CACHE_NONE, 0);
+                       wmb(); /* flush modifications to the GGTT (insert_page) */
+               } else {
+                       page_base += offset & PAGE_MASK;
+               }
                /* If we get a fault while copying data, then (presumably) our
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
+                * If the object is non-shmem backed, we retry again with the
+                * path that handles page fault.
                 */
                if (fast_user_write(ggtt->mappable, page_base,
                                    page_offset, user_data, page_length)) {
-                       ret = -EFAULT;
-                       goto out_flush;
+                       hit_slow_path = true;
+                       mutex_unlock(&dev->struct_mutex);
+                       if (slow_user_access(ggtt->mappable,
+                                            page_base,
+                                            page_offset, user_data,
+                                            page_length, true)) {
+                               ret = -EFAULT;
+                               mutex_lock(&dev->struct_mutex);
+                               goto out_flush;
+                       }
+
+                       mutex_lock(&dev->struct_mutex);
                }
 
                remain -= page_length;
@@ -815,9 +1017,31 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        }
 
 out_flush:
+       if (hit_slow_path) {
+               if (ret == 0 &&
+                   (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+                       /* The user has modified the object whilst we tried
+                        * reading from it, and we now have no idea what domain
+                        * the pages should be in. As we have just been touching
+                        * them directly, flush everything back to the GTT
+                        * domain.
+                        */
+                       ret = i915_gem_object_set_to_gtt_domain(obj, false);
+               }
+       }
+
        intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
-       i915_gem_object_ggtt_unpin(obj);
+       if (node.allocated) {
+               wmb();
+               ggtt->base.clear_range(&ggtt->base,
+                                      node.start, node.size,
+                                      true);
+               i915_gem_object_unpin_pages(obj);
+               remove_mappable_node(&node);
+       } else {
+               i915_gem_object_ggtt_unpin(obj);
+       }
 out:
        return ret;
 }
@@ -1006,7 +1230,7 @@ out:
        }
 
        if (needs_clflush_after)
-               i915_gem_chipset_flush(dev);
+               i915_gem_chipset_flush(to_i915(dev));
        else
                obj->cache_dirty = true;
 
@@ -1016,6 +1240,9 @@ out:
 
 /**
  * Writes data to the object referenced by handle.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
  *
  * On error, the contents of the buffer that were to be modified are undefined.
  */
@@ -1023,7 +1250,7 @@ int
 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_pwrite *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -1062,14 +1289,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       /* prime objects have no backing filp to GEM pread/pwrite
-        * pages from.
-        */
-       if (!obj->base.filp) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
        ret = -EFAULT;
@@ -1079,20 +1298,21 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (obj->tiling_mode == I915_TILING_NONE &&
-           obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+       if (!i915_gem_object_has_struct_page(obj) ||
            cpu_write_needs_clflush(obj)) {
-               ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+               ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
                /* Note that the gtt paths might fail with non-page-backed user
                 * pointers (e.g. gtt mappings when moving data between
                 * textures). Fallback to the shmem path in that case. */
        }
 
-       if (ret == -EFAULT || ret == -ENOSPC) {
+       if (ret == -EFAULT) {
                if (obj->phys_handle)
                        ret = i915_gem_phys_pwrite(obj, args, file);
-               else
+               else if (i915_gem_object_has_struct_page(obj))
                        ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+               else
+                       ret = -ENODEV;
        }
 
 out:
@@ -1123,17 +1343,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
        return 0;
 }
 
-static void fake_irq(unsigned long data)
-{
-       wake_up_process((struct task_struct *)data);
-}
-
-static bool missed_irq(struct drm_i915_private *dev_priv,
-                      struct intel_engine_cs *engine)
-{
-       return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
-}
-
 static unsigned long local_clock_us(unsigned *cpu)
 {
        unsigned long t;
@@ -1166,9 +1375,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu)
        return this_cpu != cpu;
 }
 
-static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
+bool __i915_spin_request(const struct drm_i915_gem_request *req,
+                        int state, unsigned long timeout_us)
 {
-       unsigned long timeout;
        unsigned cpu;
 
        /* When waiting for high frequency requests, e.g. during synchronous
@@ -1181,31 +1390,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
         * takes to sleep on a request, on the order of a microsecond.
         */
 
-       if (req->engine->irq_refcount)
-               return -EBUSY;
-
-       /* Only spin if we know the GPU is processing this request */
-       if (!i915_gem_request_started(req, true))
-               return -EAGAIN;
-
-       timeout = local_clock_us(&cpu) + 5;
-       while (!need_resched()) {
-               if (i915_gem_request_completed(req, true))
-                       return 0;
+       timeout_us += local_clock_us(&cpu);
+       do {
+               if (i915_gem_request_completed(req))
+                       return true;
 
                if (signal_pending_state(state, current))
                        break;
 
-               if (busywait_stop(timeout, cpu))
+               if (busywait_stop(timeout_us, cpu))
                        break;
 
                cpu_relax_lowlatency();
-       }
+       } while (!need_resched());
 
-       if (i915_gem_request_completed(req, false))
-               return 0;
-
-       return -EAGAIN;
+       return false;
 }
 
 /**
@@ -1213,6 +1412,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
  * @req: duh!
  * @interruptible: do an interruptible wait (normally yes)
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ * @rps: RPS client
  *
  * Note: It is of utmost importance that the passed in seqno and reset_counter
  * values have been read by the caller in an smp safe manner. Where read-side
@@ -1229,26 +1429,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                        s64 *timeout,
                        struct intel_rps_client *rps)
 {
-       struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const bool irq_test_in_progress =
-               ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
        int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       DEFINE_WAIT(wait);
-       unsigned long timeout_expire;
+       DEFINE_WAIT(reset);
+       struct intel_wait wait;
+       unsigned long timeout_remain;
        s64 before = 0; /* Only to silence a compiler warning. */
-       int ret;
+       int ret = 0;
 
-       WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
+       might_sleep();
 
        if (list_empty(&req->list))
                return 0;
 
-       if (i915_gem_request_completed(req, true))
+       if (i915_gem_request_completed(req))
                return 0;
 
-       timeout_expire = 0;
+       timeout_remain = MAX_SCHEDULE_TIMEOUT;
        if (timeout) {
                if (WARN_ON(*timeout < 0))
                        return -EINVAL;
@@ -1256,7 +1452,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                if (*timeout == 0)
                        return -ETIME;
 
-               timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+               timeout_remain = nsecs_to_jiffies_timeout(*timeout);
 
                /*
                 * Record current time in case interrupted by signal, or wedged.
@@ -1264,75 +1460,76 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                before = ktime_get_raw_ns();
        }
 
-       if (INTEL_INFO(dev_priv)->gen >= 6)
-               gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
-
        trace_i915_gem_request_wait_begin(req);
 
-       /* Optimistic spin for the next jiffie before touching IRQs */
-       ret = __i915_spin_request(req, state);
-       if (ret == 0)
-               goto out;
-
-       if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
-               ret = -ENODEV;
-               goto out;
-       }
+       /* This client is about to stall waiting for the GPU. In many cases
+        * this is undesirable and limits the throughput of the system, as
+        * many clients cannot continue processing user input/output whilst
+        * blocked. RPS autotuning may take tens of milliseconds to respond
+        * to the GPU load and thus incurs additional latency for the client.
+        * We can circumvent that by promoting the GPU frequency to maximum
+        * before we wait. This makes the GPU throttle up much more quickly
+        * (good for benchmarks and user experience, e.g. window animations),
+        * but at a cost of spending more power processing the workload
+        * (bad for battery). Not all clients even want their results
+        * immediately and for them we should just let the GPU select its own
+        * frequency to maximise efficiency. To prevent a single client from
+        * forcing the clocks too high for the whole system, we only allow
+        * each client to waitboost once in a busy period.
+        */
+       if (INTEL_INFO(req->i915)->gen >= 6)
+               gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
 
-       for (;;) {
-               struct timer_list timer;
+       /* Optimistic spin for the next ~jiffie before touching IRQs */
+       if (i915_spin_request(req, state, 5))
+               goto complete;
 
-               prepare_to_wait(&engine->irq_queue, &wait, state);
+       set_current_state(state);
+       add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-               /* We need to check whether any gpu reset happened in between
-                * the request being submitted and now. If a reset has occurred,
-                * the request is effectively complete (we either are in the
-                * process of or have discarded the rendering and completely
-                * reset the GPU. The results of the request are lost and we
-                * are free to continue on with the original operation.
+       intel_wait_init(&wait, req->seqno);
+       if (intel_engine_add_wait(req->engine, &wait))
+               /* In order to check that we haven't missed the interrupt
+                * as we enabled it, we need to kick ourselves to do a
+                * coherent check on the seqno before we sleep.
                 */
-               if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
-                       ret = 0;
-                       break;
-               }
-
-               if (i915_gem_request_completed(req, false)) {
-                       ret = 0;
-                       break;
-               }
+               goto wakeup;
 
+       for (;;) {
                if (signal_pending_state(state, current)) {
                        ret = -ERESTARTSYS;
                        break;
                }
 
-               if (timeout && time_after_eq(jiffies, timeout_expire)) {
+               timeout_remain = io_schedule_timeout(timeout_remain);
+               if (timeout_remain == 0) {
                        ret = -ETIME;
                        break;
                }
 
-               timer.function = NULL;
-               if (timeout || missed_irq(dev_priv, engine)) {
-                       unsigned long expire;
+               if (intel_wait_complete(&wait))
+                       break;
 
-                       setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-                       expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
-                       mod_timer(&timer, expire);
-               }
+               set_current_state(state);
 
-               io_schedule();
+wakeup:
+               /* Carefully check if the request is complete, giving time
+                * for the seqno to be visible following the interrupt.
+                * We also have to check in case we are kicked by the GPU
+                * reset in order to drop the struct_mutex.
+                */
+               if (__i915_request_irq_complete(req))
+                       break;
 
-               if (timer.function) {
-                       del_singleshot_timer_sync(&timer);
-                       destroy_timer_on_stack(&timer);
-               }
+               /* Only spin if we know the GPU is processing this request */
+               if (i915_spin_request(req, state, 2))
+                       break;
        }
-       if (!irq_test_in_progress)
-               engine->irq_put(engine);
+       remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-       finish_wait(&engine->irq_queue, &wait);
-
-out:
+       intel_engine_remove_wait(req->engine, &wait);
+       __set_current_state(TASK_RUNNING);
+complete:
        trace_i915_gem_request_wait_end(req);
 
        if (timeout) {
@@ -1351,6 +1548,22 @@ out:
                        *timeout = 0;
        }
 
+       if (rps && req->seqno == req->engine->last_submitted_seqno) {
+               /* The GPU is now idle and this client has stalled.
+                * Since no other client has submitted a request in the
+                * meantime, assume that this client is the only one
+                * supplying work to the GPU but is unable to keep that
+                * work supplied because it is waiting. Since the GPU is
+                * then never kept fully busy, RPS autoclocking will
+                * keep the clocks relatively low, causing further delays.
+                * Compensate by giving the synchronous client credit for
+                * a waitboost next time.
+                */
+               spin_lock(&req->i915->rps.client_lock);
+               list_del_init(&rps->link);
+               spin_unlock(&req->i915->rps.client_lock);
+       }
+
        return ret;
 }
 
@@ -1413,6 +1626,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
        list_del_init(&request->list);
        i915_gem_request_remove_from_client(request);
 
+       if (request->previous_context) {
+               if (i915.enable_execlists)
+                       intel_lr_context_unpin(request->previous_context,
+                                              request->engine);
+       }
+
+       i915_gem_context_unreference(request->ctx);
        i915_gem_request_unreference(request);
 }
 
@@ -1422,7 +1642,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
        struct intel_engine_cs *engine = req->engine;
        struct drm_i915_gem_request *tmp;
 
-       lockdep_assert_held(&engine->dev->struct_mutex);
+       lockdep_assert_held(&engine->i915->drm.struct_mutex);
 
        if (list_empty(&req->list))
                return;
@@ -1440,6 +1660,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
 /**
  * Waits for a request to be signaled, and cleans up the
  * request and object lists appropriately for that event.
+ * @req: request to wait on
  */
 int
 i915_wait_request(struct drm_i915_gem_request *req)
@@ -1450,14 +1671,14 @@ i915_wait_request(struct drm_i915_gem_request *req)
 
        interruptible = dev_priv->mm.interruptible;
 
-       BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
 
        ret = __i915_wait_request(req, interruptible, NULL, NULL);
        if (ret)
                return ret;
 
        /* If the GPU hung, we want to keep the requests to find the guilty. */
-       if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+       if (!i915_reset_in_progress(&dev_priv->gpu_error))
                __i915_gem_request_retire__upto(req);
 
        return 0;
@@ -1466,6 +1687,8 @@ i915_wait_request(struct drm_i915_gem_request *req)
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
+ * @obj: i915 gem object
+ * @readonly: waiting for read access or write
  */
 int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
@@ -1516,7 +1739,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
        else if (obj->last_write_req == req)
                i915_gem_object_retire__write(obj);
 
-       if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+       if (!i915_reset_in_progress(&req->i915->gpu_error))
                __i915_gem_request_retire__upto(req);
 }
 
@@ -1529,7 +1752,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                                            bool readonly)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
        int ret, i, n = 0;
 
@@ -1580,9 +1803,19 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
        return &fpriv->rps;
 }
 
+static enum fb_op_origin
+write_origin(struct drm_i915_gem_object *obj, unsigned domain)
+{
+       return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
+              ORIGIN_GTT : ORIGIN_CPU;
+}
+
 /**
  * Called when user space prepares to use an object with the CPU, either
  * through the mmap ioctl's mapping or a GTT mapping.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
  */
 int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -1633,9 +1866,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 
        if (write_domain != 0)
-               intel_fb_obj_invalidate(obj,
-                                       write_domain == I915_GEM_DOMAIN_GTT ?
-                                       ORIGIN_GTT : ORIGIN_CPU);
+               intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
 
 unref:
        drm_gem_object_unreference(&obj->base);
@@ -1646,6 +1877,9 @@ unlock:
 
 /**
  * Called when user space has done writes to this buffer
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
  */
 int
 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -1676,8 +1910,11 @@ unlock:
 }
 
 /**
- * Maps the contents of an object, returning the address it is mapped
- * into.
+ * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
+ *                      it is mapped to.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
  *
  * While the mapping holds a reference on the contents of the object, it doesn't
  * imply a ref on the object itself.
@@ -1736,6 +1973,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                else
                        addr = -ENOMEM;
                up_write(&mm->mmap_sem);
+
+               /* This may race, but that's ok, it only gets set */
+               WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
        }
        drm_gem_object_unreference_unlocked(obj);
        if (IS_ERR((void *)addr))
@@ -1982,7 +2222,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
                return size;
 
        /* Previous chips need a power-of-two fence region when tiling */
-       if (INTEL_INFO(dev)->gen == 3)
+       if (IS_GEN3(dev))
                gtt_size = 1024*1024;
        else
                gtt_size = 512*1024;
@@ -1995,7 +2235,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 
 /**
  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
- * @obj: object to check
+ * @dev: drm device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ * @fenced: is fenced alignemned required or not
  *
  * Return the required GTT alignment for an object, taking into account
  * potential fence register mapping.
@@ -2021,7 +2264,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int ret;
 
        dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -2162,7 +2405,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
+       struct page *page;
        int ret;
 
        BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2428,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
        if (obj->madv == I915_MADV_DONTNEED)
                obj->dirty = 0;
 
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct page *page = sg_page_iter_page(&sg_iter);
-
+       for_each_sgt_page(page, sgt_iter, obj->pages) {
                if (obj->dirty)
                        set_page_dirty(page);
 
@@ -2238,12 +2480,12 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int page_count, i;
        struct address_space *mapping;
        struct sg_table *st;
        struct scatterlist *sg;
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        int ret;
@@ -2340,8 +2582,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 
 err_pages:
        sg_mark_end(sg);
-       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-               put_page(sg_page_iter_page(&sg_iter));
+       for_each_sgt_page(page, sgt_iter, st)
+               put_page(page);
        sg_free_table(st);
        kfree(st);
 
@@ -2369,7 +2611,7 @@ err_pages:
 int
 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        const struct drm_i915_gem_object_ops *ops = obj->ops;
        int ret;
 
@@ -2395,6 +2637,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+{
+       unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+       struct sg_table *sgt = obj->pages;
+       struct sgt_iter sgt_iter;
+       struct page *page;
+       struct page *stack_pages[32];
+       struct page **pages = stack_pages;
+       unsigned long i = 0;
+       void *addr;
+
+       /* A single page can always be kmapped */
+       if (n_pages == 1)
+               return kmap(sg_page(sgt->sgl));
+
+       if (n_pages > ARRAY_SIZE(stack_pages)) {
+               /* Too big for stack -- allocate temporary array instead */
+               pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+               if (!pages)
+                       return NULL;
+       }
+
+       for_each_sgt_page(page, sgt_iter, sgt)
+               pages[i++] = page;
+
+       /* Check that we have the expected number of pages */
+       GEM_BUG_ON(i != n_pages);
+
+       addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+
+       if (pages != stack_pages)
+               drm_free_large(pages);
+
+       return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
 {
        int ret;
@@ -2407,29 +2687,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
 
        i915_gem_object_pin_pages(obj);
 
-       if (obj->mapping == NULL) {
-               struct page **pages;
-
-               pages = NULL;
-               if (obj->base.size == PAGE_SIZE)
-                       obj->mapping = kmap(sg_page(obj->pages->sgl));
-               else
-                       pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
-                                              sizeof(*pages),
-                                              GFP_TEMPORARY);
-               if (pages != NULL) {
-                       struct sg_page_iter sg_iter;
-                       int n;
-
-                       n = 0;
-                       for_each_sg_page(obj->pages->sgl, &sg_iter,
-                                        obj->pages->nents, 0)
-                               pages[n++] = sg_page_iter_page(&sg_iter);
-
-                       obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
-                       drm_free_large(pages);
-               }
-               if (obj->mapping == NULL) {
+       if (!obj->mapping) {
+               obj->mapping = i915_gem_object_map(obj);
+               if (!obj->mapping) {
                        i915_gem_object_unpin_pages(obj);
                        return ERR_PTR(-ENOMEM);
                }
@@ -2502,9 +2762,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 }
 
 static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        int ret;
 
@@ -2514,7 +2773,14 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
                if (ret)
                        return ret;
        }
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev_priv);
+
+       /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+       if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
+               while (intel_kick_waiters(dev_priv) ||
+                      intel_kick_signalers(dev_priv))
+                       yield();
+       }
 
        /* Finally reset hw state */
        for_each_engine(engine, dev_priv)
@@ -2525,7 +2791,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
 
 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        if (seqno == 0)
@@ -2534,7 +2800,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
        /* HWS page needs to be set less than what we
         * will inject to ring
         */
-       ret = i915_gem_init_seqno(dev, seqno - 1);
+       ret = i915_gem_init_seqno(dev_priv, seqno - 1);
        if (ret)
                return ret;
 
@@ -2550,13 +2816,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 }
 
 int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        /* reserve 0 for non-seqno */
        if (dev_priv->next_seqno == 0) {
-               int ret = i915_gem_init_seqno(dev, 0);
+               int ret = i915_gem_init_seqno(dev_priv, 0);
                if (ret)
                        return ret;
 
@@ -2567,6 +2831,26 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
        return 0;
 }
 
+static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       dev_priv->gt.active_engines |= intel_engine_flag(engine);
+       if (dev_priv->gt.awake)
+               return;
+
+       intel_runtime_pm_get_noresume(dev_priv);
+       dev_priv->gt.awake = true;
+
+       i915_update_gfx_val(dev_priv);
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_busy(dev_priv);
+
+       queue_delayed_work(dev_priv->wq,
+                          &dev_priv->gt.retire_work,
+                          round_jiffies_up_relative(HZ));
+}
+
 /*
  * NB: This function is not allowed to fail. Doing so would mean the the
  * request is not being tracked for completion but the work itself is
@@ -2577,16 +2861,15 @@ void __i915_add_request(struct drm_i915_gem_request *request,
                        bool flush_caches)
 {
        struct intel_engine_cs *engine;
-       struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
+       u32 reserved_tail;
        int ret;
 
        if (WARN_ON(request == NULL))
                return;
 
        engine = request->engine;
-       dev_priv = request->i915;
        ringbuf = request->ringbuf;
 
        /*
@@ -2594,9 +2877,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
         * should already have been reserved in the ring buffer. Let the ring
         * know that it is time to use that space up.
         */
-       intel_ring_reserved_space_use(ringbuf);
-
        request_start = intel_ring_get_tail(ringbuf);
+       reserved_tail = request->reserved_space;
+       request->reserved_space = 0;
+
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
         * after having emitted the batchbuffer command. Hence we need to fix
@@ -2651,56 +2935,42 @@ void __i915_add_request(struct drm_i915_gem_request *request,
        }
        /* Not allowed to fail! */
        WARN(ret, "emit|add_request failed: %d!\n", ret);
-
-       i915_queue_hangcheck(engine->dev);
-
-       queue_delayed_work(dev_priv->wq,
-                          &dev_priv->mm.retire_work,
-                          round_jiffies_up_relative(HZ));
-       intel_mark_busy(dev_priv->dev);
-
        /* Sanity check that the reserved size was large enough. */
-       intel_ring_reserved_space_end(ringbuf);
+       ret = intel_ring_get_tail(ringbuf) - request_start;
+       if (ret < 0)
+               ret += ringbuf->size;
+       WARN_ONCE(ret > reserved_tail,
+                 "Not enough space reserved (%d bytes) "
+                 "for adding the request (%d bytes)\n",
+                 reserved_tail, ret);
+
+       i915_gem_mark_busy(engine);
 }
 
-static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
-                                  const struct intel_context *ctx)
+static bool i915_context_is_banned(const struct i915_gem_context *ctx)
 {
        unsigned long elapsed;
 
-       elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
-
        if (ctx->hang_stats.banned)
                return true;
 
+       elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
        if (ctx->hang_stats.ban_period_seconds &&
            elapsed <= ctx->hang_stats.ban_period_seconds) {
-               if (!i915_gem_context_is_default(ctx)) {
-                       DRM_DEBUG("context hanging too fast, banning!\n");
-                       return true;
-               } else if (i915_stop_ring_allow_ban(dev_priv)) {
-                       if (i915_stop_ring_allow_warn(dev_priv))
-                               DRM_ERROR("gpu hanging too fast, banning!\n");
-                       return true;
-               }
+               DRM_DEBUG("context hanging too fast, banning!\n");
+               return true;
        }
 
        return false;
 }
 
-static void i915_set_reset_status(struct drm_i915_private *dev_priv,
-                                 struct intel_context *ctx,
+static void i915_set_reset_status(struct i915_gem_context *ctx,
                                  const bool guilty)
 {
-       struct i915_ctx_hang_stats *hs;
-
-       if (WARN_ON(!ctx))
-               return;
-
-       hs = &ctx->hang_stats;
+       struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
 
        if (guilty) {
-               hs->banned = i915_context_is_banned(dev_priv, ctx);
+               hs->banned = i915_context_is_banned(ctx);
                hs->batch_active++;
                hs->guilty_ts = get_seconds();
        } else {
@@ -2712,27 +2982,15 @@ void i915_gem_request_free(struct kref *req_ref)
 {
        struct drm_i915_gem_request *req = container_of(req_ref,
                                                 typeof(*req), ref);
-       struct intel_context *ctx = req->ctx;
-
-       if (req->file_priv)
-               i915_gem_request_remove_from_client(req);
-
-       if (ctx) {
-               if (i915.enable_execlists && ctx != req->i915->kernel_context)
-                       intel_lr_context_unpin(ctx, req->engine);
-
-               i915_gem_context_unreference(ctx);
-       }
-
        kmem_cache_free(req->i915->requests, req);
 }
 
 static inline int
 __i915_gem_request_alloc(struct intel_engine_cs *engine,
-                        struct intel_context *ctx,
+                        struct i915_gem_context *ctx,
                         struct drm_i915_gem_request **req_out)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
        unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
        int ret;
@@ -2754,26 +3012,16 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
        if (req == NULL)
                return -ENOMEM;
 
-       ret = i915_gem_get_seqno(engine->dev, &req->seqno);
+       ret = i915_gem_get_seqno(engine->i915, &req->seqno);
        if (ret)
                goto err;
 
        kref_init(&req->ref);
        req->i915 = dev_priv;
        req->engine = engine;
-       req->reset_counter = reset_counter;
        req->ctx  = ctx;
        i915_gem_context_reference(req->ctx);
 
-       if (i915.enable_execlists)
-               ret = intel_logical_ring_alloc_request_extras(req);
-       else
-               ret = intel_ring_alloc_request_extras(req);
-       if (ret) {
-               i915_gem_context_unreference(req->ctx);
-               goto err;
-       }
-
        /*
         * Reserve space in the ring buffer for all the commands required to
         * eventually emit this request. This is to guarantee that the
@@ -2781,24 +3029,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
         * to be redone if the request is not actually submitted straight
         * away, e.g. because a GPU scheduler has deferred it.
         */
+       req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+
        if (i915.enable_execlists)
-               ret = intel_logical_ring_reserve_space(req);
+               ret = intel_logical_ring_alloc_request_extras(req);
        else
-               ret = intel_ring_reserve_space(req);
-       if (ret) {
-               /*
-                * At this point, the request is fully allocated even if not
-                * fully prepared. Thus it can be cleaned up using the proper
-                * free code.
-                */
-               intel_ring_reserved_space_cancel(req->ringbuf);
-               i915_gem_request_unreference(req);
-               return ret;
-       }
+               ret = intel_ring_alloc_request_extras(req);
+       if (ret)
+               goto err_ctx;
 
        *req_out = req;
        return 0;
 
+err_ctx:
+       i915_gem_context_unreference(ctx);
 err:
        kmem_cache_free(dev_priv->requests, req);
        return ret;
@@ -2818,13 +3062,13 @@ err:
  */
 struct drm_i915_gem_request *
 i915_gem_request_alloc(struct intel_engine_cs *engine,
-                      struct intel_context *ctx)
+                      struct i915_gem_context *ctx)
 {
        struct drm_i915_gem_request *req;
        int err;
 
        if (ctx == NULL)
-               ctx = to_i915(engine->dev)->kernel_context;
+               ctx = engine->i915->kernel_context;
        err = __i915_gem_request_alloc(engine, ctx, &req);
        return err ? ERR_PTR(err) : req;
 }
@@ -2834,8 +3078,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
 
+       /* We are called by the error capture and reset at a random
+        * point in time. In particular, note that neither is crucially
+        * ordered with an interrupt. After a hang, the GPU is dead and we
+        * assume that no more writes can happen (we waited long enough for
+        * all writes that were in transaction to be flushed) - adding an
+        * extra delay for a recent interrupt is pointless. Hence, we do
+        * not need an engine->irq_seqno_barrier() before the seqno reads.
+        */
        list_for_each_entry(request, &engine->request_list, list) {
-               if (i915_gem_request_completed(request, false))
+               if (i915_gem_request_completed(request))
                        continue;
 
                return request;
@@ -2844,27 +3096,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
        return NULL;
 }
 
-static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
-                                      struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
        bool ring_hung;
 
        request = i915_gem_find_active_request(engine);
-
        if (request == NULL)
                return;
 
        ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
-       i915_set_reset_status(dev_priv, request->ctx, ring_hung);
-
+       i915_set_reset_status(request->ctx, ring_hung);
        list_for_each_entry_continue(request, &engine->request_list, list)
-               i915_set_reset_status(dev_priv, request->ctx, false);
+               i915_set_reset_status(request->ctx, false);
 }
 
-static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
-                                       struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 {
        struct intel_ringbuffer *buffer;
 
@@ -2888,13 +3136,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
                /* Ensure irq handler finishes or is cancelled. */
                tasklet_kill(&engine->irq_tasklet);
 
-               spin_lock_bh(&engine->execlist_lock);
-               /* list_splice_tail_init checks for empty lists */
-               list_splice_tail_init(&engine->execlist_queue,
-                                     &engine->execlist_retired_req_list);
-               spin_unlock_bh(&engine->execlist_lock);
-
-               intel_execlists_retire_requests(engine);
+               intel_execlists_cancel_requests(engine);
        }
 
        /*
@@ -2931,7 +3173,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
 
 void i915_gem_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        /*
@@ -2940,10 +3182,10 @@ void i915_gem_reset(struct drm_device *dev)
         * their reference to the objects, the inspection must be done first.
         */
        for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_status(dev_priv, engine);
+               i915_gem_reset_engine_status(engine);
 
        for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_cleanup(dev_priv, engine);
+               i915_gem_reset_engine_cleanup(engine);
 
        i915_gem_context_reset(dev);
 
@@ -2954,6 +3196,7 @@ void i915_gem_reset(struct drm_device *dev)
 
 /**
  * This function clears the request list as sequence numbers are passed.
+ * @engine: engine to retire requests on
  */
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
@@ -2972,7 +3215,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
                                           struct drm_i915_gem_request,
                                           list);
 
-               if (!i915_gem_request_completed(request, true))
+               if (!i915_gem_request_completed(request))
                        break;
 
                i915_gem_request_retire(request);
@@ -2995,58 +3238,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
                i915_gem_object_retire__read(obj, engine->id);
        }
 
-       if (unlikely(engine->trace_irq_req &&
-                    i915_gem_request_completed(engine->trace_irq_req, true))) {
-               engine->irq_put(engine);
-               i915_gem_request_assign(&engine->trace_irq_req, NULL);
-       }
-
        WARN_ON(i915_verify_lists(engine->dev));
 }
 
-bool
-i915_gem_retire_requests(struct drm_device *dev)
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
-       bool idle = true;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       if (dev_priv->gt.active_engines == 0)
+               return;
+
+       GEM_BUG_ON(!dev_priv->gt.awake);
 
        for_each_engine(engine, dev_priv) {
                i915_gem_retire_requests_ring(engine);
-               idle &= list_empty(&engine->request_list);
-               if (i915.enable_execlists) {
-                       spin_lock_bh(&engine->execlist_lock);
-                       idle &= list_empty(&engine->execlist_queue);
-                       spin_unlock_bh(&engine->execlist_lock);
-
-                       intel_execlists_retire_requests(engine);
-               }
+               if (list_empty(&engine->request_list))
+                       dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
        }
 
-       if (idle)
-               mod_delayed_work(dev_priv->wq,
-                                  &dev_priv->mm.idle_work,
+       if (dev_priv->gt.active_engines == 0)
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.idle_work,
                                   msecs_to_jiffies(100));
-
-       return idle;
 }
 
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), mm.retire_work.work);
-       struct drm_device *dev = dev_priv->dev;
-       bool idle;
+               container_of(work, typeof(*dev_priv), gt.retire_work.work);
+       struct drm_device *dev = &dev_priv->drm;
 
        /* Come back later if the device is busy... */
-       idle = false;
        if (mutex_trylock(&dev->struct_mutex)) {
-               idle = i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev_priv);
                mutex_unlock(&dev->struct_mutex);
        }
-       if (!idle)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+
+       /* Keep the retire handler running until we are finally idle.
+        * We do not need to do this test under locking as in the worst-case
+        * we queue the retire worker once too often.
+        */
+       if (READ_ONCE(dev_priv->gt.awake))
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.retire_work,
                                   round_jiffies_up_relative(HZ));
 }
 
@@ -3054,25 +3291,55 @@ static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), mm.idle_work.work);
-       struct drm_device *dev = dev_priv->dev;
+               container_of(work, typeof(*dev_priv), gt.idle_work.work);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
+       unsigned int stuck_engines;
+       bool rearm_hangcheck;
+
+       if (!READ_ONCE(dev_priv->gt.awake))
+               return;
+
+       if (READ_ONCE(dev_priv->gt.active_engines))
+               return;
+
+       rearm_hangcheck =
+               cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+
+       if (!mutex_trylock(&dev->struct_mutex)) {
+               /* Currently busy, come back later */
+               mod_delayed_work(dev_priv->wq,
+                                &dev_priv->gt.idle_work,
+                                msecs_to_jiffies(50));
+               goto out_rearm;
+       }
+
+       if (dev_priv->gt.active_engines)
+               goto out_unlock;
 
        for_each_engine(engine, dev_priv)
-               if (!list_empty(&engine->request_list))
-                       return;
+               i915_gem_batch_pool_fini(&engine->batch_pool);
 
-       /* we probably should sync with hangcheck here, using cancel_work_sync.
-        * Also locking seems to be fubar here, engine->request_list is protected
-        * by dev->struct_mutex. */
+       GEM_BUG_ON(!dev_priv->gt.awake);
+       dev_priv->gt.awake = false;
+       rearm_hangcheck = false;
 
-       intel_mark_idle(dev);
+       stuck_engines = intel_kick_waiters(dev_priv);
+       if (unlikely(stuck_engines)) {
+               DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
+               dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
+       }
 
-       if (mutex_trylock(&dev->struct_mutex)) {
-               for_each_engine(engine, dev_priv)
-                       i915_gem_batch_pool_fini(&engine->batch_pool);
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_idle(dev_priv);
+       intel_runtime_pm_put(dev_priv);
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
 
-               mutex_unlock(&dev->struct_mutex);
+out_rearm:
+       if (rearm_hangcheck) {
+               GEM_BUG_ON(!dev_priv->gt.awake);
+               i915_queue_hangcheck(dev_priv);
        }
 }
 
@@ -3080,6 +3347,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
  * Ensures that an object will eventually get non-busy by flushing any required
  * write domains, emitting any outstanding lazy request and retiring and
  * completed requests.
+ * @obj: object to flush
  */
 static int
 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
@@ -3096,14 +3364,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
                if (req == NULL)
                        continue;
 
-               if (list_empty(&req->list))
-                       goto retire;
-
-               if (i915_gem_request_completed(req, true)) {
-                       __i915_gem_request_retire__upto(req);
-retire:
+               if (i915_gem_request_completed(req))
                        i915_gem_object_retire__read(obj, i);
-               }
        }
 
        return 0;
@@ -3111,7 +3373,9 @@ retire:
 
 /**
  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
- * @DRM_IOCTL_ARGS: standard ioctl arguments
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
  *
  * Returns 0 if successful, else an error is returned with the remaining time in
  * the timeout parameter.
@@ -3185,7 +3449,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                        ret = __i915_wait_request(req[i], true,
                                                  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
                                                  to_rps_client(file));
-               i915_gem_request_unreference__unlocked(req[i]);
+               i915_gem_request_unreference(req[i]);
        }
        return ret;
 
@@ -3208,10 +3472,10 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
        if (to == from)
                return 0;
 
-       if (i915_gem_request_completed(from_req, true))
+       if (i915_gem_request_completed(from_req))
                return 0;
 
-       if (!i915_semaphore_is_enabled(obj->base.dev)) {
+       if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
                struct drm_i915_private *i915 = to_i915(obj->base.dev);
                ret = __i915_wait_request(from_req,
                                          i915->mm.interruptible,
@@ -3345,10 +3609,21 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
                                            old_write_domain);
 }
 
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+       GEM_BUG_ON(vma->pin_count);
+
+       if (vma->iomap == NULL)
+               return;
+
+       io_mapping_unmap(vma->iomap);
+       vma->iomap = NULL;
+}
+
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int ret;
 
        if (list_empty(&vma->obj_link))
@@ -3377,6 +3652,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
                ret = i915_gem_object_put_fence(obj);
                if (ret)
                        return ret;
+
+               __i915_vma_iounmap(vma);
        }
 
        trace_i915_vma_unbind(vma);
@@ -3422,26 +3699,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
        return __i915_vma_unbind(vma, false);
 }
 
-int i915_gpu_idle(struct drm_device *dev)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        int ret;
 
-       /* Flush everything onto the inactive list. */
-       for_each_engine(engine, dev_priv) {
-               if (!i915.enable_execlists) {
-                       struct drm_i915_gem_request *req;
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-                       req = i915_gem_request_alloc(engine, NULL);
-                       if (IS_ERR(req))
-                               return PTR_ERR(req);
-
-                       ret = i915_switch_context(req);
-                       i915_add_request_no_flush(req);
-                       if (ret)
-                               return ret;
-               }
+       for_each_engine(engine, dev_priv) {
+               if (engine->last_context == NULL)
+                       continue;
 
                ret = intel_engine_idle(engine);
                if (ret)
@@ -3488,6 +3755,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 /**
  * Finds free space in the GTT aperture and binds the object or a view of it
  * there.
+ * @obj: object to bind
+ * @vm: address space to bind into
+ * @ggtt_view: global gtt view if applicable
+ * @alignment: requested alignment
+ * @flags: mask of PIN_* flags to use
  */
 static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
@@ -3731,7 +4003,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
                return;
 
        if (i915_gem_clflush_object(obj, obj->pin_display))
-               i915_gem_chipset_flush(obj->base.dev);
+               i915_gem_chipset_flush(to_i915(obj->base.dev));
 
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
@@ -3745,6 +4017,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 
 /**
  * Moves a single object to the GTT read, and possibly write domain.
+ * @obj: object to act on
+ * @write: ask for write access or read only
  *
  * This function returns when the move is complete, including waiting on
  * flushes to occur.
@@ -3816,6 +4090,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
 /**
  * Changes the cache-level of an object across all VMA.
+ * @obj: object to act on
+ * @cache_level: new cache level to set for the object
  *
  * After this function returns, the object will be in the new cache-level
  * across all GTT and the contents of the backing storage will be coherent,
@@ -3925,11 +4201,9 @@ out:
         * object is now coherent at its new cache level (with respect
         * to the access domain).
         */
-       if (obj->cache_dirty &&
-           obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
-           cpu_write_needs_clflush(obj)) {
+       if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
                if (i915_gem_clflush_object(obj, true))
-                       i915_gem_chipset_flush(obj->base.dev);
+                       i915_gem_chipset_flush(to_i915(obj->base.dev));
        }
 
        return 0;
@@ -3967,7 +4241,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
                               struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
        enum i915_cache_level level;
@@ -4097,6 +4371,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
 
 /**
  * Moves a single object to the CPU read, and possibly write domain.
+ * @obj: object to act on
+ * @write: requesting write or read-only access
  *
  * This function returns when the move is complete, including waiting on
  * flushes to occur.
@@ -4159,7 +4435,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 static int
 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
        struct drm_i915_gem_request *request, *target = NULL;
@@ -4195,10 +4471,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                return 0;
 
        ret = __i915_wait_request(target, true, NULL, NULL);
-       if (ret == 0)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
-
-       i915_gem_request_unreference__unlocked(target);
+       i915_gem_request_unreference(target);
 
        return ret;
 }
@@ -4256,7 +4529,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
                       uint32_t alignment,
                       uint64_t flags)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct i915_vma *vma;
        unsigned bound;
        int ret;
@@ -4420,7 +4693,7 @@ int
 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_madvise *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -4490,7 +4763,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        obj->fence_reg = I915_FENCE_REG_NONE;
        obj->madv = I915_MADV_WILLNEED;
 
-       i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+       i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4499,21 +4772,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
        .put_pages = i915_gem_object_put_pages_gtt,
 };
 
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
                                                  size_t size)
 {
        struct drm_i915_gem_object *obj;
        struct address_space *mapping;
        gfp_t mask;
+       int ret;
 
        obj = i915_gem_object_alloc(dev);
        if (obj == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
-       if (drm_gem_object_init(dev, &obj->base, size) != 0) {
-               i915_gem_object_free(obj);
-               return NULL;
-       }
+       ret = drm_gem_object_init(dev, &obj->base, size);
+       if (ret)
+               goto fail;
 
        mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
        if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4550,6 +4823,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        trace_i915_gem_object_create(obj);
 
        return obj;
+
+fail:
+       i915_gem_object_free(obj);
+
+       return ERR_PTR(ret);
 }
 
 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4580,7 +4858,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_vma *vma, *next;
 
        intel_runtime_pm_get(dev_priv);
@@ -4655,16 +4933,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
                                           const struct i915_ggtt_view *view)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
 
-       BUG_ON(!view);
+       GEM_BUG_ON(!view);
 
        list_for_each_entry(vma, &obj->vma_list, obj_link)
-               if (vma->vm == &ggtt->base &&
-                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma;
        return NULL;
 }
@@ -4688,7 +4962,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
 static void
 i915_gem_stop_engines(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv)
@@ -4698,27 +4972,28 @@ i915_gem_stop_engines(struct drm_device *dev)
 int
 i915_gem_suspend(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gpu_idle(dev);
+       ret = i915_gem_wait_for_idle(dev_priv);
        if (ret)
                goto err;
 
-       i915_gem_retire_requests(dev);
+       i915_gem_retire_requests(dev_priv);
 
        i915_gem_stop_engines(dev);
+       i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-       cancel_delayed_work_sync(&dev_priv->mm.retire_work);
-       flush_delayed_work(&dev_priv->mm.idle_work);
+       cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+       flush_delayed_work(&dev_priv->gt.idle_work);
 
        /* Assert that we sucessfully flushed all the work and
         * reset the GPU back to its idle, low power state.
         */
-       WARN_ON(dev_priv->mm.busy);
+       WARN_ON(dev_priv->gt.awake);
 
        return 0;
 
@@ -4727,40 +5002,9 @@ err:
        return ret;
 }
 
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
-{
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
-       int i, ret;
-
-       if (!HAS_L3_DPF(dev) || !remap_info)
-               return 0;
-
-       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
-       if (ret)
-               return ret;
-
-       /*
-        * Note: We do not worry about the concurrent register cacheline hang
-        * here because no other code should access these registers other than
-        * at initialization time.
-        */
-       for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
-               intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
-               intel_ring_emit(engine, remap_info[i]);
-       }
-
-       intel_ring_advance(engine);
-
-       return ret;
-}
-
 void i915_gem_init_swizzling(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen < 5 ||
            dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -4785,7 +5029,7 @@ void i915_gem_init_swizzling(struct drm_device *dev)
 
 static void init_unused_ring(struct drm_device *dev, u32 base)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(RING_CTL(base), 0);
        I915_WRITE(RING_HEAD(base), 0);
@@ -4812,7 +5056,7 @@ static void init_unused_rings(struct drm_device *dev)
 
 int i915_gem_init_engines(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = intel_init_render_ring_buffer(dev);
@@ -4860,9 +5104,9 @@ cleanup_render_ring:
 int
 i915_gem_init_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
-       int ret, j;
+       int ret;
 
        /* Double layer security blanket, see i915_gem_init() */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4914,59 +5158,10 @@ i915_gem_init_hw(struct drm_device *dev)
        intel_mocs_init_l3cc_table(dev);
 
        /* We can't enable contexts until all firmware is loaded */
-       if (HAS_GUC_UCODE(dev)) {
-               ret = intel_guc_ucode_load(dev);
-               if (ret) {
-                       DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
-                       ret = -EIO;
-                       goto out;
-               }
-       }
-
-       /*
-        * Increment the next seqno by 0x100 so we have a visible break
-        * on re-initialisation
-        */
-       ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
+       ret = intel_guc_setup(dev);
        if (ret)
                goto out;
 
-       /* Now it is safe to go back round and do everything else: */
-       for_each_engine(engine, dev_priv) {
-               struct drm_i915_gem_request *req;
-
-               req = i915_gem_request_alloc(engine, NULL);
-               if (IS_ERR(req)) {
-                       ret = PTR_ERR(req);
-                       break;
-               }
-
-               if (engine->id == RCS) {
-                       for (j = 0; j < NUM_L3_SLICES(dev); j++) {
-                               ret = i915_gem_l3_remap(req, j);
-                               if (ret)
-                                       goto err_request;
-                       }
-               }
-
-               ret = i915_ppgtt_init_ring(req);
-               if (ret)
-                       goto err_request;
-
-               ret = i915_gem_context_enable(req);
-               if (ret)
-                       goto err_request;
-
-err_request:
-               i915_add_request_no_flush(req);
-               if (ret) {
-                       DRM_ERROR("Failed to enable %s, error=%d\n",
-                                 engine->name, ret);
-                       i915_gem_cleanup_engines(dev);
-                       break;
-               }
-       }
-
 out:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
        return ret;
@@ -4974,12 +5169,9 @@ out:
 
 int i915_gem_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
-       i915.enable_execlists = intel_sanitize_enable_execlists(dev,
-                       i915.enable_execlists);
-
        mutex_lock(&dev->struct_mutex);
 
        if (!i915.enable_execlists) {
@@ -5002,10 +5194,7 @@ int i915_gem_init(struct drm_device *dev)
         */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       ret = i915_gem_init_userptr(dev);
-       if (ret)
-               goto out_unlock;
-
+       i915_gem_init_userptr(dev_priv);
        i915_gem_init_ggtt(dev);
 
        ret = i915_gem_context_init(dev);
@@ -5037,19 +5226,11 @@ out_unlock:
 void
 i915_gem_cleanup_engines(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv)
                dev_priv->gt.cleanup_engine(engine);
-
-       if (i915.enable_execlists)
-               /*
-                * Neither the BIOS, ourselves or any other kernel
-                * expects the system to be in execlists mode on startup,
-                * so we need to reset the GPU back to legacy mode.
-                */
-               intel_gpu_reset(dev, ALL_ENGINES);
 }
 
 static void
@@ -5062,7 +5243,7 @@ init_engine_lists(struct intel_engine_cs *engine)
 void
 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
            !IS_CHERRYVIEW(dev_priv))
@@ -5073,7 +5254,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
        else
                dev_priv->num_fence_regs = 8;
 
-       if (intel_vgpu_active(dev))
+       if (intel_vgpu_active(dev_priv))
                dev_priv->num_fence_regs =
                                I915_READ(vgtif_reg(avail_rs.fence_num));
 
@@ -5086,7 +5267,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 void
 i915_gem_load_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        dev_priv->objects =
@@ -5114,22 +5295,15 @@ i915_gem_load_init(struct drm_device *dev)
                init_engine_lists(&dev_priv->engine[i]);
        for (i = 0; i < I915_MAX_NUM_FENCES; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
-       INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+       INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
                          i915_gem_retire_work_handler);
-       INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+       INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
                          i915_gem_idle_work_handler);
+       init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-       /*
-        * Set initial sequence number for requests.
-        * Using this number allows the wraparound to happen early,
-        * catching any obvious problems.
-        */
-       dev_priv->next_seqno = ((u32)~0 - 0x1100);
-       dev_priv->last_seqno = ((u32)~0 - 0x1101);
-
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 
        init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -5148,6 +5322,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
        kmem_cache_destroy(dev_priv->objects);
 }
 
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+{
+       struct drm_i915_gem_object *obj;
+
+       /* Called just before we write the hibernation image.
+        *
+        * We need to update the domain tracking to reflect that the CPU
+        * will be accessing all the pages to create and restore from the
+        * hibernation, and so upon restoration those pages will be in the
+        * CPU domain.
+        *
+        * To make sure the hibernation image contains the latest state,
+        * we update that state just before writing out the image.
+        */
+
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+
+       return 0;
+}
+
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5187,7 +5389,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
                return -ENOMEM;
 
        file->driver_priv = file_priv;
-       file_priv->dev_priv = dev->dev_private;
+       file_priv->dev_priv = to_i915(dev);
        file_priv->file = file;
        INIT_LIST_HEAD(&file_priv->rps.link);
 
@@ -5233,7 +5435,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
                        struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
        struct i915_vma *vma;
 
        WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
@@ -5254,13 +5456,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
 {
-       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == &ggtt->base &&
-                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma->node.start;
 
        WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +5485,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
 {
-       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == &ggtt->base &&
+               if (vma->is_ggtt &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
                    drm_mm_node_allocated(&vma->node))
                        return true;
@@ -5310,23 +5507,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
        return false;
 }
 
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
-                               struct i915_address_space *vm)
+unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
 {
-       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
-       BUG_ON(list_empty(&o->vma_list));
+       GEM_BUG_ON(list_empty(&o->vma_list));
 
        list_for_each_entry(vma, &o->vma_list, obj_link) {
                if (vma->is_ggtt &&
-                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-                       continue;
-               if (vma->vm == vm)
+                   vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
                        return vma->node.size;
        }
+
        return 0;
 }
 
@@ -5347,7 +5539,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
        struct page *page;
 
        /* Only default objects have per-page dirty tracking */
-       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+       if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
                return NULL;
 
        page = i915_gem_object_get_page(obj, n);
@@ -5365,8 +5557,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
        size_t bytes;
        int ret;
 
-       obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
-       if (IS_ERR_OR_NULL(obj))
+       obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+       if (IS_ERR(obj))
                return obj;
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
index 7bf2f3f2968e64fa62d94b4dd20540da6538fb09..3752d5daa4b2171cc9f3f45f312e7985407ae381 100644 (file)
@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
        if (obj == NULL) {
                int ret;
 
-               obj = i915_gem_alloc_object(pool->dev, size);
-               if (obj == NULL)
-                       return ERR_PTR(-ENOMEM);
+               obj = i915_gem_object_create(pool->dev, size);
+               if (IS_ERR(obj))
+                       return obj;
 
                ret = i915_gem_object_get_pages(obj);
                if (ret)
index e5acc3916f75fa4dda6024a4f9c97c504d452349..3c97f0e7a003758e6d60f3a4f65b040c91d3a316 100644 (file)
@@ -90,6 +90,8 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 
+#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
+
 /* This is a HW constraint. The value below is the largest known requirement
  * I've seen in a spec to date, and that was a workaround for a non-shipping
  * part. It should be safe to decrease this, but it's more future proof as is.
 #define GEN6_CONTEXT_ALIGN (64<<10)
 #define GEN7_CONTEXT_ALIGN 4096
 
-static size_t get_context_alignment(struct drm_device *dev)
+static size_t get_context_alignment(struct drm_i915_private *dev_priv)
 {
-       if (IS_GEN6(dev))
+       if (IS_GEN6(dev_priv))
                return GEN6_CONTEXT_ALIGN;
 
        return GEN7_CONTEXT_ALIGN;
 }
 
-static int get_context_size(struct drm_device *dev)
+static int get_context_size(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
        u32 reg;
 
-       switch (INTEL_INFO(dev)->gen) {
+       switch (INTEL_GEN(dev_priv)) {
        case 6:
                reg = I915_READ(CXT_SIZE);
                ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
                break;
        case 7:
                reg = I915_READ(GEN7_CXT_SIZE);
-               if (IS_HASWELL(dev))
+               if (IS_HASWELL(dev_priv))
                        ret = HSW_CXT_TOTAL_SIZE;
                else
                        ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev)
        return ret;
 }
 
-static void i915_gem_context_clean(struct intel_context *ctx)
+static void i915_gem_context_clean(struct i915_gem_context *ctx)
 {
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
        struct i915_vma *vma, *next;
@@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx)
 
 void i915_gem_context_free(struct kref *ctx_ref)
 {
-       struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+       struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
+       int i;
 
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
        trace_i915_context_free(ctx);
 
-       if (i915.enable_execlists)
-               intel_lr_context_free(ctx);
-
        /*
         * This context is going away and we need to remove all VMAs still
         * around. This is to handle imported shared objects for which
@@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref)
 
        i915_ppgtt_put(ctx->ppgtt);
 
-       if (ctx->legacy_hw_ctx.rcs_state)
-               drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
+       for (i = 0; i < I915_NUM_ENGINES; i++) {
+               struct intel_context *ce = &ctx->engine[i];
+
+               if (!ce->state)
+                       continue;
+
+               WARN_ON(ce->pin_count);
+               if (ce->ringbuf)
+                       intel_ringbuffer_free(ce->ringbuf);
+
+               drm_gem_object_unreference(&ce->state->base);
+       }
+
        list_del(&ctx->link);
+
+       ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
        kfree(ctx);
 }
 
@@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
        struct drm_i915_gem_object *obj;
        int ret;
 
-       obj = i915_gem_alloc_object(dev, size);
-       if (obj == NULL)
-               return ERR_PTR(-ENOMEM);
+       lockdep_assert_held(&dev->struct_mutex);
+
+       obj = i915_gem_object_create(dev, size);
+       if (IS_ERR(obj))
+               return obj;
 
        /*
         * Try to make the context utilize L3 as well as LLC.
@@ -209,22 +224,52 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
        return obj;
 }
 
-static struct intel_context *
+static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
+{
+       int ret;
+
+       ret = ida_simple_get(&dev_priv->context_hw_ida,
+                            0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+       if (ret < 0) {
+               /* Contexts are only released when no longer active.
+                * Flush any pending retires to hopefully release some
+                * stale contexts and try again.
+                */
+               i915_gem_retire_requests(dev_priv);
+               ret = ida_simple_get(&dev_priv->context_hw_ida,
+                                    0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+               if (ret < 0)
+                       return ret;
+       }
+
+       *out = ret;
+       return 0;
+}
+
+static struct i915_gem_context *
 __create_hw_context(struct drm_device *dev,
                    struct drm_i915_file_private *file_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_context *ctx;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_gem_context *ctx;
        int ret;
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (ctx == NULL)
                return ERR_PTR(-ENOMEM);
 
+       ret = assign_hw_id(dev_priv, &ctx->hw_id);
+       if (ret) {
+               kfree(ctx);
+               return ERR_PTR(ret);
+       }
+
        kref_init(&ctx->ref);
        list_add_tail(&ctx->link, &dev_priv->context_list);
        ctx->i915 = dev_priv;
 
+       ctx->ggtt_alignment = get_context_alignment(dev_priv);
+
        if (dev_priv->hw_context_size) {
                struct drm_i915_gem_object *obj =
                                i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
@@ -232,7 +277,7 @@ __create_hw_context(struct drm_device *dev,
                        ret = PTR_ERR(obj);
                        goto err_out;
                }
-               ctx->legacy_hw_ctx.rcs_state = obj;
+               ctx->engine[RCS].state = obj;
        }
 
        /* Default context will never have a file_priv */
@@ -249,9 +294,13 @@ __create_hw_context(struct drm_device *dev,
        /* NB: Mark all slices as needing a remap so that when the context first
         * loads it will restore whatever remap state already exists. If there
         * is no remap info, it will be a NOP. */
-       ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
+       ctx->remap_slice = ALL_L3_SLICES(dev_priv);
 
        ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+       ctx->ring_size = 4 * PAGE_SIZE;
+       ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
+                            GEN8_CTX_ADDRESSING_MODE_SHIFT;
+       ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
 
        return ctx;
 
@@ -265,44 +314,27 @@ err_out:
  * context state of the GPU for applications that don't utilize HW contexts, as
  * well as an idle case.
  */
-static struct intel_context *
+static struct i915_gem_context *
 i915_gem_create_context(struct drm_device *dev,
                        struct drm_i915_file_private *file_priv)
 {
-       const bool is_global_default_ctx = file_priv == NULL;
-       struct intel_context *ctx;
-       int ret = 0;
+       struct i915_gem_context *ctx;
 
-       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+       lockdep_assert_held(&dev->struct_mutex);
 
        ctx = __create_hw_context(dev, file_priv);
        if (IS_ERR(ctx))
                return ctx;
 
-       if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
-               /* We may need to do things with the shrinker which
-                * require us to immediately switch back to the default
-                * context. This can cause a problem as pinning the
-                * default context also requires GTT space which may not
-                * be available. To avoid this we always pin the default
-                * context.
-                */
-               ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
-                                           get_context_alignment(dev), 0);
-               if (ret) {
-                       DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
-                       goto err_destroy;
-               }
-       }
-
        if (USES_FULL_PPGTT(dev)) {
                struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
 
-               if (IS_ERR_OR_NULL(ppgtt)) {
+               if (IS_ERR(ppgtt)) {
                        DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
                                         PTR_ERR(ppgtt));
-                       ret = PTR_ERR(ppgtt);
-                       goto err_unpin;
+                       idr_remove(&file_priv->context_idr, ctx->user_handle);
+                       i915_gem_context_unreference(ctx);
+                       return ERR_CAST(ppgtt);
                }
 
                ctx->ppgtt = ppgtt;
@@ -311,76 +343,102 @@ i915_gem_create_context(struct drm_device *dev,
        trace_i915_context_create(ctx);
 
        return ctx;
+}
 
-err_unpin:
-       if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
-               i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
-err_destroy:
-       idr_remove(&file_priv->context_idr, ctx->user_handle);
-       i915_gem_context_unreference(ctx);
-       return ERR_PTR(ret);
+/**
+ * i915_gem_context_create_gvt - create a GVT GEM context
+ * @dev: drm device *
+ *
+ * This function is used to create a GVT specific GEM context.
+ *
+ * Returns:
+ * pointer to i915_gem_context on success, error pointer if failed
+ *
+ */
+struct i915_gem_context *
+i915_gem_context_create_gvt(struct drm_device *dev)
+{
+       struct i915_gem_context *ctx;
+       int ret;
+
+       if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+               return ERR_PTR(-ENODEV);
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ERR_PTR(ret);
+
+       ctx = i915_gem_create_context(dev, NULL);
+       if (IS_ERR(ctx))
+               goto out;
+
+       ctx->execlists_force_single_submission = true;
+       ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
+out:
+       mutex_unlock(&dev->struct_mutex);
+       return ctx;
 }
 
-static void i915_gem_context_unpin(struct intel_context *ctx,
+static void i915_gem_context_unpin(struct i915_gem_context *ctx,
                                   struct intel_engine_cs *engine)
 {
        if (i915.enable_execlists) {
                intel_lr_context_unpin(ctx, engine);
        } else {
-               if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
-                       i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+               struct intel_context *ce = &ctx->engine[engine->id];
+
+               if (ce->state)
+                       i915_gem_object_ggtt_unpin(ce->state);
+
                i915_gem_context_unreference(ctx);
        }
 }
 
 void i915_gem_context_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       lockdep_assert_held(&dev->struct_mutex);
 
        if (i915.enable_execlists) {
-               struct intel_context *ctx;
+               struct i915_gem_context *ctx;
 
                list_for_each_entry(ctx, &dev_priv->context_list, link)
                        intel_lr_context_reset(dev_priv, ctx);
        }
 
-       for (i = 0; i < I915_NUM_ENGINES; i++) {
-               struct intel_engine_cs *engine = &dev_priv->engine[i];
-
-               if (engine->last_context) {
-                       i915_gem_context_unpin(engine->last_context, engine);
-                       engine->last_context = NULL;
-               }
-       }
-
-       /* Force the GPU state to be reinitialised on enabling */
-       dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
+       i915_gem_context_lost(dev_priv);
 }
 
 int i915_gem_context_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_context *ctx;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_gem_context *ctx;
 
        /* Init should only be called once per module load. Eventually the
         * restriction on the context_disabled check can be loosened. */
        if (WARN_ON(dev_priv->kernel_context))
                return 0;
 
-       if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
+       if (intel_vgpu_active(dev_priv) &&
+           HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
                if (!i915.enable_execlists) {
                        DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
                        return -EINVAL;
                }
        }
 
+       /* Using the simple ida interface, the max is limited by sizeof(int) */
+       BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
+       ida_init(&dev_priv->context_hw_ida);
+
        if (i915.enable_execlists) {
                /* NB: intentionally left blank. We will allocate our own
                 * backing objects as we need them, thank you very much */
                dev_priv->hw_context_size = 0;
-       } else if (HAS_HW_CONTEXTS(dev)) {
-               dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
+       } else if (HAS_HW_CONTEXTS(dev_priv)) {
+               dev_priv->hw_context_size =
+                       round_up(get_context_size(dev_priv), 4096);
                if (dev_priv->hw_context_size > (1<<20)) {
                        DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
                                         dev_priv->hw_context_size);
@@ -403,67 +461,60 @@ int i915_gem_context_init(struct drm_device *dev)
        return 0;
 }
 
-void i915_gem_context_fini(struct drm_device *dev)
+void i915_gem_context_lost(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_context *dctx = dev_priv->kernel_context;
-       int i;
-
-       if (dctx->legacy_hw_ctx.rcs_state) {
-               /* The only known way to stop the gpu from accessing the hw context is
-                * to reset it. Do this as the very last operation to avoid confusing
-                * other code, leading to spurious errors. */
-               intel_gpu_reset(dev, ALL_ENGINES);
+       struct intel_engine_cs *engine;
 
-               /* When default context is created and switched to, base object refcount
-                * will be 2 (+1 from object creation and +1 from do_switch()).
-                * i915_gem_context_fini() will be called after gpu_idle() has switched
-                * to default context. So we need to unreference the base object once
-                * to offset the do_switch part, so that i915_gem_context_unreference()
-                * can then free the base object correctly. */
-               WARN_ON(!dev_priv->engine[RCS].last_context);
-
-               i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
-       }
-
-       for (i = I915_NUM_ENGINES; --i >= 0;) {
-               struct intel_engine_cs *engine = &dev_priv->engine[i];
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
+       for_each_engine(engine, dev_priv) {
                if (engine->last_context) {
                        i915_gem_context_unpin(engine->last_context, engine);
                        engine->last_context = NULL;
                }
        }
 
-       i915_gem_context_unreference(dctx);
-       dev_priv->kernel_context = NULL;
-}
+       /* Force the GPU state to be restored on enabling */
+       if (!i915.enable_execlists) {
+               struct i915_gem_context *ctx;
 
-int i915_gem_context_enable(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       int ret;
+               list_for_each_entry(ctx, &dev_priv->context_list, link) {
+                       if (!i915_gem_context_is_default(ctx))
+                               continue;
 
-       if (i915.enable_execlists) {
-               if (engine->init_context == NULL)
-                       return 0;
+                       for_each_engine(engine, dev_priv)
+                               ctx->engine[engine->id].initialised = false;
 
-               ret = engine->init_context(req);
-       } else
-               ret = i915_switch_context(req);
+                       ctx->remap_slice = ALL_L3_SLICES(dev_priv);
+               }
 
-       if (ret) {
-               DRM_ERROR("ring init context: %d\n", ret);
-               return ret;
+               for_each_engine(engine, dev_priv) {
+                       struct intel_context *kce =
+                               &dev_priv->kernel_context->engine[engine->id];
+
+                       kce->initialised = true;
+               }
        }
+}
 
-       return 0;
+void i915_gem_context_fini(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_gem_context *dctx = dev_priv->kernel_context;
+
+       lockdep_assert_held(&dev->struct_mutex);
+
+       i915_gem_context_unreference(dctx);
+       dev_priv->kernel_context = NULL;
+
+       ida_destroy(&dev_priv->context_hw_ida);
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
 {
-       struct intel_context *ctx = p;
+       struct i915_gem_context *ctx = p;
 
+       ctx->file_priv = ERR_PTR(-EBADF);
        i915_gem_context_unreference(ctx);
        return 0;
 }
@@ -471,7 +522,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
 
        idr_init(&file_priv->context_idr);
 
@@ -491,31 +542,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
+       lockdep_assert_held(&dev->struct_mutex);
+
        idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
        idr_destroy(&file_priv->context_idr);
 }
 
-struct intel_context *
-i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
-{
-       struct intel_context *ctx;
-
-       ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
-       if (!ctx)
-               return ERR_PTR(-ENOENT);
-
-       return ctx;
-}
-
 static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
+       struct drm_i915_private *dev_priv = req->i915;
        struct intel_engine_cs *engine = req->engine;
        u32 flags = hw_flags | MI_MM_SPACE_GTT;
        const int num_rings =
                /* Use an extended w/a on ivb+ if signalling from other rings */
-               i915_semaphore_is_enabled(engine->dev) ?
-               hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
+               i915_semaphore_is_enabled(dev_priv) ?
+               hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
                0;
        int len, ret;
 
@@ -524,21 +566,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
         * explicitly, so we rely on the value at ring init, stored in
         * itlb_before_ctx_switch.
         */
-       if (IS_GEN6(engine->dev)) {
+       if (IS_GEN6(dev_priv)) {
                ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
                if (ret)
                        return ret;
        }
 
        /* These flags are for resource streamer on HSW+ */
-       if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
+       if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
                flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
-       else if (INTEL_INFO(engine->dev)->gen < 8)
+       else if (INTEL_GEN(dev_priv) < 8)
                flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
        len = 4;
-       if (INTEL_INFO(engine->dev)->gen >= 7)
+       if (INTEL_GEN(dev_priv) >= 7)
                len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
        ret = intel_ring_begin(req, len);
@@ -546,14 +588,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
                return ret;
 
        /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-       if (INTEL_INFO(engine->dev)->gen >= 7) {
+       if (INTEL_GEN(dev_priv) >= 7) {
                intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
                if (num_rings) {
                        struct intel_engine_cs *signaller;
 
                        intel_ring_emit(engine,
                                        MI_LOAD_REGISTER_IMM(num_rings));
-                       for_each_engine(signaller, to_i915(engine->dev)) {
+                       for_each_engine(signaller, dev_priv) {
                                if (signaller == engine)
                                        continue;
 
@@ -568,7 +610,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
        intel_ring_emit(engine, MI_NOOP);
        intel_ring_emit(engine, MI_SET_CONTEXT);
        intel_ring_emit(engine,
-                       i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+                       i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
                        flags);
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -576,14 +618,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
         */
        intel_ring_emit(engine, MI_NOOP);
 
-       if (INTEL_INFO(engine->dev)->gen >= 7) {
+       if (INTEL_GEN(dev_priv) >= 7) {
                if (num_rings) {
                        struct intel_engine_cs *signaller;
                        i915_reg_t last_reg = {}; /* keep gcc quiet */
 
                        intel_ring_emit(engine,
                                        MI_LOAD_REGISTER_IMM(num_rings));
-                       for_each_engine(signaller, to_i915(engine->dev)) {
+                       for_each_engine(signaller, dev_priv) {
                                if (signaller == engine)
                                        continue;
 
@@ -609,45 +651,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
        return ret;
 }
 
-static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
-                                  struct intel_context *to)
+static int remap_l3(struct drm_i915_gem_request *req, int slice)
+{
+       u32 *remap_info = req->i915->l3_parity.remap_info[slice];
+       struct intel_engine_cs *engine = req->engine;
+       int i, ret;
+
+       if (!remap_info)
+               return 0;
+
+       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+       if (ret)
+               return ret;
+
+       /*
+        * Note: We do not worry about the concurrent register cacheline hang
+        * here because no other code should access these registers other than
+        * at initialization time.
+        */
+       intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+       for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
+               intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+               intel_ring_emit(engine, remap_info[i]);
+       }
+       intel_ring_emit(engine, MI_NOOP);
+       intel_ring_advance(engine);
+
+       return 0;
+}
+
+static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
+                                  struct intel_engine_cs *engine,
+                                  struct i915_gem_context *to)
 {
        if (to->remap_slice)
                return false;
 
-       if (!to->legacy_hw_ctx.initialized)
+       if (!to->engine[RCS].initialised)
                return false;
 
-       if (to->ppgtt &&
-           !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+       if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
                return false;
 
        return to == engine->last_context;
 }
 
 static bool
-needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
+                 struct intel_engine_cs *engine,
+                 struct i915_gem_context *to)
 {
-       if (!to->ppgtt)
+       if (!ppgtt)
                return false;
 
+       /* Always load the ppgtt on first use */
+       if (!engine->last_context)
+               return true;
+
+       /* Same context without new entries, skip */
        if (engine->last_context == to &&
-           !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+           !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
                return false;
 
        if (engine->id != RCS)
                return true;
 
-       if (INTEL_INFO(engine->dev)->gen < 8)
+       if (INTEL_GEN(engine->i915) < 8)
                return true;
 
        return false;
 }
 
 static bool
-needs_pd_load_post(struct intel_context *to, u32 hw_flags)
+needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
+                  struct i915_gem_context *to,
+                  u32 hw_flags)
 {
-       if (!to->ppgtt)
+       if (!ppgtt)
                return false;
 
        if (!IS_GEN8(to->i915))
@@ -661,18 +741,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags)
 
 static int do_rcs_switch(struct drm_i915_gem_request *req)
 {
-       struct intel_context *to = req->ctx;
+       struct i915_gem_context *to = req->ctx;
        struct intel_engine_cs *engine = req->engine;
-       struct intel_context *from;
+       struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
+       struct i915_gem_context *from;
        u32 hw_flags;
        int ret, i;
 
-       if (skip_rcs_switch(engine, to))
+       if (skip_rcs_switch(ppgtt, engine, to))
                return 0;
 
        /* Trying to pin first makes error handling easier. */
-       ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
-                                   get_context_alignment(engine->dev),
+       ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
+                                   to->ggtt_alignment,
                                    0);
        if (ret)
                return ret;
@@ -694,37 +775,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
         *
         * XXX: We need a real interface to do this instead of trickery.
         */
-       ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
+       ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
        if (ret)
                goto unpin_out;
 
-       if (needs_pd_load_pre(engine, to)) {
+       if (needs_pd_load_pre(ppgtt, engine, to)) {
                /* Older GENs and non render rings still want the load first,
                 * "PP_DCLV followed by PP_DIR_BASE register through Load
                 * Register Immediate commands in Ring Buffer before submitting
                 * a context."*/
                trace_switch_mm(engine, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, req);
+               ret = ppgtt->switch_mm(ppgtt, req);
                if (ret)
                        goto unpin_out;
        }
 
-       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+       if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
                /* NB: If we inhibit the restore, the context is not allowed to
                 * die because future work may end up depending on valid address
                 * space. This means we must enforce that a page table load
                 * occur when this occurs. */
                hw_flags = MI_RESTORE_INHIBIT;
-       else if (to->ppgtt &&
-                intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+       else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
                hw_flags = MI_FORCE_RESTORE;
        else
                hw_flags = 0;
 
-       /* We should never emit switch_mm more than once */
-       WARN_ON(needs_pd_load_pre(engine, to) &&
-               needs_pd_load_post(to, hw_flags));
-
        if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
                ret = mi_set_context(req, hw_flags);
                if (ret)
@@ -738,8 +814,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from != NULL) {
-               from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
+               from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
@@ -747,10 +823,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
                 * able to defer doing this until we know the object would be
                 * swapped, but there is no way to do that yet.
                 */
-               from->legacy_hw_ctx.rcs_state->dirty = 1;
+               from->engine[RCS].state->dirty = 1;
 
                /* obj is kept alive until the next request by its active ref */
-               i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
+               i915_gem_object_ggtt_unpin(from->engine[RCS].state);
                i915_gem_context_unreference(from);
        }
        i915_gem_context_reference(to);
@@ -759,9 +835,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
        /* GEN8 does *not* require an explicit reload if the PDPs have been
         * setup, and we do not wish to move them.
         */
-       if (needs_pd_load_post(to, hw_flags)) {
+       if (needs_pd_load_post(ppgtt, to, hw_flags)) {
                trace_switch_mm(engine, to);
-               ret = to->ppgtt->switch_mm(to->ppgtt, req);
+               ret = ppgtt->switch_mm(ppgtt, req);
                /* The hardware context switch is emitted, but we haven't
                 * actually changed the state - so it's probably safe to bail
                 * here. Still, let the user know something dangerous has
@@ -771,33 +847,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
                        return ret;
        }
 
-       if (to->ppgtt)
-               to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+       if (ppgtt)
+               ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
 
        for (i = 0; i < MAX_L3_SLICES; i++) {
                if (!(to->remap_slice & (1<<i)))
                        continue;
 
-               ret = i915_gem_l3_remap(req, i);
+               ret = remap_l3(req, i);
                if (ret)
                        return ret;
 
                to->remap_slice &= ~(1<<i);
        }
 
-       if (!to->legacy_hw_ctx.initialized) {
+       if (!to->engine[RCS].initialised) {
                if (engine->init_context) {
                        ret = engine->init_context(req);
                        if (ret)
                                return ret;
                }
-               to->legacy_hw_ctx.initialized = true;
+               to->engine[RCS].initialised = true;
        }
 
        return 0;
 
 unpin_out:
-       i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+       i915_gem_object_ggtt_unpin(to->engine[RCS].state);
        return ret;
 }
 
@@ -817,25 +893,24 @@ unpin_out:
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = req->i915;
 
        WARN_ON(i915.enable_execlists);
-       WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       lockdep_assert_held(&req->i915->drm.struct_mutex);
 
-       if (engine->id != RCS ||
-           req->ctx->legacy_hw_ctx.rcs_state == NULL) {
-               struct intel_context *to = req->ctx;
+       if (!req->ctx->engine[engine->id].state) {
+               struct i915_gem_context *to = req->ctx;
+               struct i915_hw_ppgtt *ppgtt =
+                       to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
 
-               if (needs_pd_load_pre(engine, to)) {
+               if (needs_pd_load_pre(ppgtt, engine, to)) {
                        int ret;
 
                        trace_switch_mm(engine, to);
-                       ret = to->ppgtt->switch_mm(to->ppgtt, req);
+                       ret = ppgtt->switch_mm(ppgtt, req);
                        if (ret)
                                return ret;
 
-                       /* Doing a PD load always reloads the page dirs */
-                       to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+                       ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
                }
 
                if (to != engine->last_context) {
@@ -861,7 +936,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_context_create *args = data;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        if (!contexts_enabled(dev))
@@ -890,7 +965,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_context_destroy *args = data;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        if (args->pad != 0)
@@ -903,13 +978,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       ctx = i915_gem_context_get(file_priv, args->ctx_id);
+       ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
        if (IS_ERR(ctx)) {
                mutex_unlock(&dev->struct_mutex);
                return PTR_ERR(ctx);
        }
 
-       idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
+       idr_remove(&file_priv->context_idr, ctx->user_handle);
        i915_gem_context_unreference(ctx);
        mutex_unlock(&dev->struct_mutex);
 
@@ -922,14 +997,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct drm_i915_gem_context_param *args = data;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
-       ctx = i915_gem_context_get(file_priv, args->ctx_id);
+       ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
        if (IS_ERR(ctx)) {
                mutex_unlock(&dev->struct_mutex);
                return PTR_ERR(ctx);
@@ -951,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                else
                        args->value = to_i915(dev)->ggtt.base.total;
                break;
+       case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+               args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -965,14 +1043,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct drm_i915_gem_context_param *args = data;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        int ret;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
-       ctx = i915_gem_context_get(file_priv, args->ctx_id);
+       ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
        if (IS_ERR(ctx)) {
                mutex_unlock(&dev->struct_mutex);
                return PTR_ERR(ctx);
@@ -996,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
                        ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
                }
                break;
+       case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+               if (args->size) {
+                       ret = -EINVAL;
+               } else {
+                       if (args->value)
+                               ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
+                       else
+                               ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
+               }
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -1004,3 +1092,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 
        return ret;
 }
+
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
+                                      void *data, struct drm_file *file)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_reset_stats *args = data;
+       struct i915_ctx_hang_stats *hs;
+       struct i915_gem_context *ctx;
+       int ret;
+
+       if (args->flags || args->pad)
+               return -EINVAL;
+
+       if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               return ret;
+
+       ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
+       if (IS_ERR(ctx)) {
+               mutex_unlock(&dev->struct_mutex);
+               return PTR_ERR(ctx);
+       }
+       hs = &ctx->hang_stats;
+
+       if (capable(CAP_SYS_ADMIN))
+               args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+       else
+               args->reset_count = 0;
+
+       args->batch_active = hs->batch_active;
+       args->batch_pending = hs->batch_pending;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/i915_gem_dmabuf.h
new file mode 100644 (file)
index 0000000..9131555
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_GEM_DMABUF_H_
+#define _I915_GEM_DMABUF_H_
+
+#include <linux/dma-buf.h>
+
+static inline struct reservation_object *
+i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
+{
+       struct dma_buf *dma_buf;
+
+       if (obj->base.dma_buf)
+               dma_buf = obj->base.dma_buf;
+       else if (obj->base.import_attach)
+               dma_buf = obj->base.import_attach->dmabuf;
+       else
+               return NULL;
+
+       return dma_buf->resv;
+}
+
+#endif
index ea1f8d1bd228e08b0a1c14238b9fea1b98182449..3c1280ec7ff648979ea96a17325a5aa6924e688d 100644 (file)
 #include "intel_drv.h"
 #include "i915_trace.h"
 
+static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+
+       if (i915.enable_execlists)
+               return 0;
+
+       for_each_engine(engine, dev_priv) {
+               struct drm_i915_gem_request *req;
+               int ret;
+
+               if (engine->last_context == NULL)
+                       continue;
+
+               if (engine->last_context == dev_priv->kernel_context)
+                       continue;
+
+               req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
+               if (IS_ERR(req))
+                       return PTR_ERR(req);
+
+               ret = i915_switch_context(req);
+               i915_add_request_no_flush(req);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+
 static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
@@ -150,11 +181,19 @@ none:
 
        /* Only idle the GPU and repeat the search once */
        if (pass++ == 0) {
-               ret = i915_gpu_idle(dev);
+               struct drm_i915_private *dev_priv = to_i915(dev);
+
+               if (i915_is_ggtt(vm)) {
+                       ret = switch_to_pinned_context(dev_priv);
+                       if (ret)
+                               return ret;
+               }
+
+               ret = i915_gem_wait_for_idle(dev_priv);
                if (ret)
                        return ret;
 
-               i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev_priv);
                goto search_again;
        }
 
@@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
        trace_i915_gem_evict_vm(vm);
 
        if (do_idle) {
-               ret = i915_gpu_idle(vm->dev);
+               struct drm_i915_private *dev_priv = to_i915(vm->dev);
+
+               if (i915_is_ggtt(vm)) {
+                       ret = switch_to_pinned_context(dev_priv);
+                       if (ret)
+                               return ret;
+               }
+
+               ret = i915_gem_wait_for_idle(dev_priv);
                if (ret)
                        return ret;
 
-               i915_gem_retire_requests(vm->dev);
+               i915_gem_retire_requests(dev_priv);
 
                WARN_ON(!list_empty(&vm->active_list));
        }
index 33df74d98269c83bf6fa6d45f852839a959ff724..1978633e7549ad4e7c7fd0f0d4eacfe4fce229c9 100644 (file)
@@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma)
 static int
 i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
                            struct list_head *vmas,
-                           struct intel_context *ctx,
+                           struct i915_gem_context *ctx,
                            bool *need_relocs)
 {
        struct drm_i915_gem_object *obj;
@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
        struct i915_address_space *vm;
        struct list_head ordered_vmas;
        struct list_head pinned_vmas;
-       bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
+       bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
        int retry;
 
        i915_gem_retire_requests_ring(engine);
@@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                                  struct intel_engine_cs *engine,
                                  struct eb_vmas *eb,
                                  struct drm_i915_gem_exec_object2 *exec,
-                                 struct intel_context *ctx)
+                                 struct i915_gem_context *ctx)
 {
        struct drm_i915_gem_relocation_entry *reloc;
        struct i915_address_space *vm;
@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
        }
 
        if (flush_chipset)
-               i915_gem_chipset_flush(req->engine->dev);
+               i915_gem_chipset_flush(req->engine->i915);
 
        if (flush_domains & I915_GEM_DOMAIN_GTT)
                wmb();
@@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev,
        return 0;
 }
 
-static struct intel_context *
+static struct i915_gem_context *
 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
                          struct intel_engine_cs *engine, const u32 ctx_id)
 {
-       struct intel_context *ctx = NULL;
+       struct i915_gem_context *ctx = NULL;
        struct i915_ctx_hang_stats *hs;
 
        if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
                return ERR_PTR(-EINVAL);
 
-       ctx = i915_gem_context_get(file->driver_priv, ctx_id);
+       ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
        if (IS_ERR(ctx))
                return ctx;
 
@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
                return ERR_PTR(-EIO);
        }
 
-       if (i915.enable_execlists && !ctx->engine[engine->id].state) {
-               int ret = intel_lr_context_deferred_alloc(ctx, engine);
-               if (ret) {
-                       DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
-                       return ERR_PTR(ret);
-               }
-       }
-
        return ctx;
 }
 
@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
                        i915_gem_request_assign(&obj->last_fenced_req, req);
                        if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-                               struct drm_i915_private *dev_priv = to_i915(engine->dev);
+                               struct drm_i915_private *dev_priv = engine->i915;
                                list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
                                               &dev_priv->mm.fence_list);
                        }
@@ -1150,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
                            struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret, i;
 
        if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
@@ -1233,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 {
        struct drm_device *dev = params->dev;
        struct intel_engine_cs *engine = params->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u64 exec_start, exec_len;
        int instp_mode;
        u32 instp_mask;
@@ -1336,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
        /* Check whether the file_priv has already selected one ring. */
        if ((int)file_priv->bsd_ring < 0) {
                /* If not, use the ping-pong mechanism to select one. */
-               mutex_lock(&dev_priv->dev->struct_mutex);
+               mutex_lock(&dev_priv->drm.struct_mutex);
                file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
                dev_priv->mm.bsd_ring_dispatch_index ^= 1;
-               mutex_unlock(&dev_priv->dev->struct_mutex);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
        }
 
        return file_priv->bsd_ring;
@@ -1436,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *batch_obj;
        struct drm_i915_gem_exec_object2 shadow_exec_entry;
        struct intel_engine_cs *engine;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        struct i915_address_space *vm;
        struct i915_execbuffer_params params_master; /* XXX: will be removed later */
        struct i915_execbuffer_params *params = &params_master;
@@ -1454,7 +1446,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        dispatch_flags = 0;
        if (args->flags & I915_EXEC_SECURE) {
-               if (!file->is_master || !capable(CAP_SYS_ADMIN))
+               if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
                    return -EPERM;
 
                dispatch_flags |= I915_DISPATCH_SECURE;
@@ -1485,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                dispatch_flags |= I915_DISPATCH_RS;
        }
 
+       /* Take a local wakeref for preparing to dispatch the execbuf as
+        * we expect to access the hardware fairly frequently in the
+        * process. Upon first dispatch, we acquire another prolonged
+        * wakeref that we hold until the GPU has been idle for at least
+        * 100ms.
+        */
        intel_runtime_pm_get(dev_priv);
 
        ret = i915_mutex_lock_interruptible(dev);
@@ -1561,7 +1559,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                                                             batch_obj,
                                                             args->batch_start_offset,
                                                             args->batch_len,
-                                                            file->is_master);
+                                                            drm_is_current_master(file));
                if (IS_ERR(parsed_batch_obj)) {
                        ret = PTR_ERR(parsed_batch_obj);
                        goto err;
index a2b938ec01a7ca6d4854a83213c56aa3f02f35ef..251d7a95af891dc5f4071196bc5168769cd74398 100644 (file)
@@ -58,7 +58,7 @@
 static void i965_write_fence_reg(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t fence_reg_lo, fence_reg_hi;
        int fence_pitch_shift;
 
@@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
 static void i915_write_fence_reg(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
 
        if (obj) {
@@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
 static void i830_write_fence_reg(struct drm_device *dev, int reg,
                                struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t val;
 
        if (obj) {
@@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
 static void i915_gem_write_fence(struct drm_device *dev, int reg,
                                 struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Ensure that all CPU reads are completed before installing a fence
         * and all writes before removing the fence.
@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int reg = fence_number(dev_priv, fence);
 
        i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct drm_i915_fence_reg *fence;
        int ret;
 
@@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
 static struct drm_i915_fence_reg *
 i915_find_fence_reg(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_fence_reg *reg, *avail;
        int i;
 
@@ -367,7 +367,7 @@ int
 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool enable = obj->tiling_mode != I915_TILING_NONE;
        struct drm_i915_fence_reg *reg;
        int ret;
@@ -433,7 +433,7 @@ bool
 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 {
        if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
                struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
 
                WARN_ON(!ggtt_vma ||
@@ -457,7 +457,7 @@ void
 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
 {
        if (obj->fence_reg != I915_FENCE_REG_NONE) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+               struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
                WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
                dev_priv->fence_regs[obj->fence_reg].pin_count--;
        }
@@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
  */
 void i915_gem_restore_fences(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
 void
 i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
        uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
 void
 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
+       struct page *page;
        int i;
 
        if (obj->bit_17 == NULL)
                return;
 
        i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct page *page = sg_page_iter_page(&sg_iter);
+       for_each_sgt_page(page, sgt_iter, obj->pages) {
                char new_bit_17 = page_to_phys(page) >> 17;
                if ((new_bit_17 & 0x1) !=
                    (test_bit(i, obj->bit_17) != 0)) {
@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 void
 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
+       struct page *page;
        int page_count = obj->base.size >> PAGE_SHIFT;
        int i;
 
@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
        }
 
        i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
+
+       for_each_sgt_page(page, sgt_iter, obj->pages) {
+               if (page_to_phys(page) & (1 << 17))
                        __set_bit(i, obj->bit_17);
                else
                        __clear_bit(i, obj->bit_17);
index 92acdff9dad31817960d83f9c02e2506f79509a2..10f1e32767e60c7f636161485658bc3f9f5ec99f 100644 (file)
  *
  */
 
+static inline struct i915_ggtt *
+i915_vm_to_ggtt(struct i915_address_space *vm)
+{
+       GEM_BUG_ON(!i915_is_ggtt(vm));
+       return container_of(vm, struct i915_ggtt, base);
+}
+
 static int
 i915_get_ggtt_vma_pages(struct i915_vma *vma);
 
@@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
        .type = I915_GGTT_VIEW_ROTATED,
 };
 
-static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
+                               int enable_ppgtt)
 {
        bool has_aliasing_ppgtt;
        bool has_full_ppgtt;
        bool has_full_48bit_ppgtt;
 
-       has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
-       has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
-       has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
+       has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
+       has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
+       has_full_48bit_ppgtt =
+               IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
 
-       if (intel_vgpu_active(dev))
+       if (intel_vgpu_active(dev_priv))
                has_full_ppgtt = false; /* emulation is too hard */
 
+       if (!has_aliasing_ppgtt)
+               return 0;
+
        /*
         * We don't allow disabling PPGTT for gen9+ as it's a requirement for
         * execlists, the sole mechanism available to submit work.
         */
-       if (INTEL_INFO(dev)->gen < 9 &&
-           (enable_ppgtt == 0 || !has_aliasing_ppgtt))
+       if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
                return 0;
 
        if (enable_ppgtt == 1)
@@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
 
 #ifdef CONFIG_INTEL_IOMMU
        /* Disable ppgtt on SNB if VT-d is on. */
-       if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
+       if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
                DRM_INFO("Disabling PPGTT because VT-d is on\n");
                return 0;
        }
 #endif
 
        /* Early VLV doesn't have this */
-       if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
+       if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
                DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
                return 0;
        }
 
-       if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
+       if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
                return has_full_48bit_ppgtt ? 3 : 2;
        else
                return has_aliasing_ppgtt ? 1 : 0;
@@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
 static int gen8_init_scratch(struct i915_address_space *vm)
 {
        struct drm_device *dev = vm->dev;
+       int ret;
 
        vm->scratch_page = alloc_scratch_page(dev);
        if (IS_ERR(vm->scratch_page))
@@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 
        vm->scratch_pt = alloc_pt(dev);
        if (IS_ERR(vm->scratch_pt)) {
-               free_scratch_page(dev, vm->scratch_page);
-               return PTR_ERR(vm->scratch_pt);
+               ret = PTR_ERR(vm->scratch_pt);
+               goto free_scratch_page;
        }
 
        vm->scratch_pd = alloc_pd(dev);
        if (IS_ERR(vm->scratch_pd)) {
-               free_pt(dev, vm->scratch_pt);
-               free_scratch_page(dev, vm->scratch_page);
-               return PTR_ERR(vm->scratch_pd);
+               ret = PTR_ERR(vm->scratch_pd);
+               goto free_pt;
        }
 
        if (USES_FULL_48BIT_PPGTT(dev)) {
                vm->scratch_pdp = alloc_pdp(dev);
                if (IS_ERR(vm->scratch_pdp)) {
-                       free_pd(dev, vm->scratch_pd);
-                       free_pt(dev, vm->scratch_pt);
-                       free_scratch_page(dev, vm->scratch_page);
-                       return PTR_ERR(vm->scratch_pdp);
+                       ret = PTR_ERR(vm->scratch_pdp);
+                       goto free_pd;
                }
        }
 
@@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
                gen8_initialize_pdp(vm, vm->scratch_pdp);
 
        return 0;
+
+free_pd:
+       free_pd(dev, vm->scratch_pd);
+free_pt:
+       free_pt(dev, vm->scratch_pt);
+free_scratch_page:
+       free_scratch_page(dev, vm->scratch_page);
+
+       return ret;
 }
 
 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 
-       if (intel_vgpu_active(vm->dev))
+       if (intel_vgpu_active(to_i915(vm->dev)))
                gen8_ppgtt_notify_vgt(ppgtt, false);
 
        if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
                                                              0, 0,
                                                              GEN8_PML4E_SHIFT);
 
-               if (intel_vgpu_active(ppgtt->base.dev)) {
+               if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
                        ret = gen8_preallocate_top_level_pdps(ppgtt);
                        if (ret)
                                goto free_scratch;
                }
        }
 
-       if (intel_vgpu_active(ppgtt->base.dev))
+       if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
                gen8_ppgtt_notify_vgt(ppgtt, true);
 
        return 0;
@@ -1552,13 +1570,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
        struct i915_page_table *unused;
        gen6_pte_t scratch_pte;
        uint32_t pd_entry;
-       uint32_t  pte, pde, temp;
+       uint32_t  pte, pde;
        uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
 
        scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
                                     I915_CACHE_LLC, true, 0);
 
-       gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
+       gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
                u32 expected;
                gen6_pte_t *pt_vaddr;
                const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
@@ -1622,9 +1640,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_page_table *pt;
-       uint32_t pde, temp;
+       uint32_t pde;
 
-       gen6_for_each_pde(pt, pd, start, length, temp, pde)
+       gen6_for_each_pde(pt, pd, start, length, pde)
                gen6_write_pde(pd, pde, pt);
 
        /* Make sure write is complete before other code can use this page
@@ -1665,17 +1683,6 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
        return 0;
 }
 
-static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
-
-       I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
-       I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
-       return 0;
-}
-
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
                          struct drm_i915_gem_request *req)
 {
@@ -1713,21 +1720,16 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
                          struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
+       struct drm_i915_private *dev_priv = req->i915;
 
        I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
        I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
-
-       POSTING_READ(RING_PP_DIR_DCLV(engine));
-
        return 0;
 }
 
 static void gen8_ppgtt_enable(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv) {
@@ -1739,7 +1741,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
 
 static void gen7_ppgtt_enable(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        uint32_t ecochk, ecobits;
 
@@ -1764,7 +1766,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
 
 static void gen6_ppgtt_enable(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t ecochk, gab_ctl, ecobits;
 
        ecobits = I915_READ(GAC_ECO_BITS);
@@ -1821,20 +1823,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                                      enum i915_cache_level cache_level, u32 flags)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-       gen6_pte_t *pt_vaddr;
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned act_pt = first_entry / GEN6_PTES;
        unsigned act_pte = first_entry % GEN6_PTES;
-       struct sg_page_iter sg_iter;
+       gen6_pte_t *pt_vaddr = NULL;
+       struct sgt_iter sgt_iter;
+       dma_addr_t addr;
 
-       pt_vaddr = NULL;
-       for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
+       for_each_sgt_dma(addr, sgt_iter, pages) {
                if (pt_vaddr == NULL)
                        pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
 
                pt_vaddr[act_pte] =
-                       vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
-                                      cache_level, true, flags);
+                       vm->pte_encode(addr, cache_level, true, flags);
 
                if (++act_pte == GEN6_PTES) {
                        kunmap_px(ppgtt, pt_vaddr);
@@ -1843,6 +1844,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                        act_pte = 0;
                }
        }
+
        if (pt_vaddr)
                kunmap_px(ppgtt, pt_vaddr);
 }
@@ -1857,7 +1859,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_table *pt;
        uint32_t start, length, start_save, length_save;
-       uint32_t pde, temp;
+       uint32_t pde;
        int ret;
 
        if (WARN_ON(start_in + length_in > ppgtt->base.total))
@@ -1873,7 +1875,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
         * need allocation. The second stage marks use ptes within the page
         * tables.
         */
-       gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+       gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
                if (pt != vm->scratch_pt) {
                        WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
                        continue;
@@ -1898,7 +1900,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
        start = start_save;
        length = length_save;
 
-       gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+       gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
                DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
 
                bitmap_zero(tmp_bitmap, GEN6_PTES);
@@ -1967,15 +1969,16 @@ static void gen6_free_scratch(struct i915_address_space *vm)
 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+       struct i915_page_directory *pd = &ppgtt->pd;
+       struct drm_device *dev = vm->dev;
        struct i915_page_table *pt;
        uint32_t pde;
 
        drm_mm_remove_node(&ppgtt->node);
 
-       gen6_for_all_pdes(pt, ppgtt, pde) {
+       gen6_for_all_pdes(pt, pd, pde)
                if (pt != vm->scratch_pt)
-                       free_pt(ppgtt->base.dev, pt);
-       }
+                       free_pt(dev, pt);
 
        gen6_free_scratch(vm);
 }
@@ -2041,9 +2044,9 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
                                  uint64_t start, uint64_t length)
 {
        struct i915_page_table *unused;
-       uint32_t pde, temp;
+       uint32_t pde;
 
-       gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
+       gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
                ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
 }
 
@@ -2055,18 +2058,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        int ret;
 
        ppgtt->base.pte_encode = ggtt->base.pte_encode;
-       if (IS_GEN6(dev)) {
+       if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
                ppgtt->switch_mm = gen6_mm_switch;
-       } else if (IS_HASWELL(dev)) {
+       else if (IS_HASWELL(dev))
                ppgtt->switch_mm = hsw_mm_switch;
-       } else if (IS_GEN7(dev)) {
+       else if (IS_GEN7(dev))
                ppgtt->switch_mm = gen7_mm_switch;
-       else
+       else
                BUG();
 
-       if (intel_vgpu_active(dev))
-               ppgtt->switch_mm = vgpu_mm_switch;
-
        ret = gen6_ppgtt_alloc(ppgtt);
        if (ret)
                return ret;
@@ -2115,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
                                    struct drm_i915_private *dev_priv)
 {
        drm_mm_init(&vm->mm, vm->start, vm->total);
-       vm->dev = dev_priv->dev;
+       vm->dev = &dev_priv->drm;
        INIT_LIST_HEAD(&vm->active_list);
        INIT_LIST_HEAD(&vm->inactive_list);
        list_add_tail(&vm->global_link, &dev_priv->vm_list);
@@ -2123,7 +2123,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
 
 static void gtt_write_workarounds(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* This function is for gtt related workarounds. This function is
         * called on driver load and after a GPU reset, so you can place
@@ -2140,9 +2140,9 @@ static void gtt_write_workarounds(struct drm_device *dev)
                I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
 }
 
-int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
        ret = __hw_ppgtt_init(dev, ppgtt);
@@ -2179,20 +2179,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
        return 0;
 }
 
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
-{
-       struct drm_i915_private *dev_priv = req->i915;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
-       if (i915.enable_execlists)
-               return 0;
-
-       if (!ppgtt)
-               return 0;
-
-       return ppgtt->switch_mm(ppgtt, req);
-}
-
 struct i915_hw_ppgtt *
 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
 {
@@ -2257,8 +2243,8 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
        if (unlikely(ggtt->do_idle_maps)) {
                dev_priv->mm.interruptible = false;
-               if (i915_gpu_idle(dev_priv->dev)) {
-                       DRM_ERROR("Couldn't idle GPU\n");
+               if (i915_gem_wait_for_idle(dev_priv)) {
+                       DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
                }
@@ -2275,12 +2261,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
                dev_priv->mm.interruptible = interruptible;
 }
 
-void i915_check_and_clear_faults(struct drm_device *dev)
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
 
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_INFO(dev_priv)->gen < 6)
                return;
 
        for_each_engine(engine, dev_priv) {
@@ -2324,7 +2309,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen < 6)
                return;
 
-       i915_check_and_clear_faults(dev);
+       i915_check_and_clear_faults(dev_priv);
 
        ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
                             true);
@@ -2352,29 +2337,49 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
 #endif
 }
 
+static void gen8_ggtt_insert_page(struct i915_address_space *vm,
+                                 dma_addr_t addr,
+                                 uint64_t offset,
+                                 enum i915_cache_level level,
+                                 u32 unused)
+{
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       gen8_pte_t __iomem *pte =
+               (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+               (offset >> PAGE_SHIFT);
+       int rpm_atomic_seq;
+
+       rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+
+       gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
+
+       I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+
+       assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+}
+
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
                                     uint64_t start,
                                     enum i915_cache_level level, u32 unused)
 {
        struct drm_i915_private *dev_priv = to_i915(vm->dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       unsigned first_entry = start >> PAGE_SHIFT;
-       gen8_pte_t __iomem *gtt_entries =
-               (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
-       int i = 0;
-       struct sg_page_iter sg_iter;
-       dma_addr_t addr = 0; /* shut up gcc */
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+       struct sgt_iter sgt_iter;
+       gen8_pte_t __iomem *gtt_entries;
+       gen8_pte_t gtt_entry;
+       dma_addr_t addr;
        int rpm_atomic_seq;
+       int i = 0;
 
        rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
-       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
-               addr = sg_dma_address(sg_iter.sg) +
-                       (sg_iter.sg_pgoffset << PAGE_SHIFT);
-               gen8_set_pte(&gtt_entries[i],
-                            gen8_pte_encode(addr, level, true));
-               i++;
+       gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
+
+       for_each_sgt_dma(addr, sgt_iter, st) {
+               gtt_entry = gen8_pte_encode(addr, level, true);
+               gen8_set_pte(&gtt_entries[i++], gtt_entry);
        }
 
        /*
@@ -2385,8 +2390,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
         * hardware should work, we must keep this posting read for paranoia.
         */
        if (i != 0)
-               WARN_ON(readq(&gtt_entries[i-1])
-                       != gen8_pte_encode(addr, level, true));
+               WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
 
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
@@ -2424,6 +2428,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
        stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
 }
 
+static void gen6_ggtt_insert_page(struct i915_address_space *vm,
+                                 dma_addr_t addr,
+                                 uint64_t offset,
+                                 enum i915_cache_level level,
+                                 u32 flags)
+{
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       gen6_pte_t __iomem *pte =
+               (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
+               (offset >> PAGE_SHIFT);
+       int rpm_atomic_seq;
+
+       rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+
+       iowrite32(vm->pte_encode(addr, level, true, flags), pte);
+
+       I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+
+       assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+}
+
 /*
  * Binds an object into the global gtt with the specified cache level. The object
  * will be accessible to the GPU via commands whose operands reference offsets
@@ -2436,21 +2462,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     enum i915_cache_level level, u32 flags)
 {
        struct drm_i915_private *dev_priv = to_i915(vm->dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       unsigned first_entry = start >> PAGE_SHIFT;
-       gen6_pte_t __iomem *gtt_entries =
-               (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
-       int i = 0;
-       struct sg_page_iter sg_iter;
-       dma_addr_t addr = 0;
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+       struct sgt_iter sgt_iter;
+       gen6_pte_t __iomem *gtt_entries;
+       gen6_pte_t gtt_entry;
+       dma_addr_t addr;
        int rpm_atomic_seq;
+       int i = 0;
 
        rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
-       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
-               addr = sg_page_iter_dma_address(&sg_iter);
-               iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
-               i++;
+       gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
+
+       for_each_sgt_dma(addr, sgt_iter, st) {
+               gtt_entry = vm->pte_encode(addr, level, true, flags);
+               iowrite32(gtt_entry, &gtt_entries[i++]);
        }
 
        /* XXX: This serves as a posting read to make sure that the PTE has
@@ -2459,10 +2485,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
         * of NUMA access patterns. Therefore, even with the way we assume
         * hardware should work, we must keep this posting read for paranoia.
         */
-       if (i != 0) {
-               unsigned long gtt = readl(&gtt_entries[i-1]);
-               WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
-       }
+       if (i != 0)
+               WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
 
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
@@ -2474,13 +2498,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
        assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 }
 
+static void nop_clear_range(struct i915_address_space *vm,
+                           uint64_t start,
+                           uint64_t length,
+                           bool use_scratch)
+{
+}
+
 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
                                  uint64_t start,
                                  uint64_t length,
                                  bool use_scratch)
 {
        struct drm_i915_private *dev_priv = to_i915(vm->dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
        gen8_pte_t scratch_pte, __iomem *gtt_base =
@@ -2512,7 +2543,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  bool use_scratch)
 {
        struct drm_i915_private *dev_priv = to_i915(vm->dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
        gen6_pte_t scratch_pte, __iomem *gtt_base =
@@ -2538,12 +2569,30 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
        assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 }
 
+static void i915_ggtt_insert_page(struct i915_address_space *vm,
+                                 dma_addr_t addr,
+                                 uint64_t offset,
+                                 enum i915_cache_level cache_level,
+                                 u32 unused)
+{
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
+       unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+               AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+       int rpm_atomic_seq;
+
+       rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
+
+       intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
+
+       assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
+}
+
 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *pages,
                                     uint64_t start,
                                     enum i915_cache_level cache_level, u32 unused)
 {
-       struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
        unsigned int flags = (cache_level == I915_CACHE_NONE) ?
                AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
        int rpm_atomic_seq;
@@ -2561,7 +2610,7 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
                                  uint64_t length,
                                  bool unused)
 {
-       struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(vm->dev);
        unsigned first_entry = start >> PAGE_SHIFT;
        unsigned num_entries = length >> PAGE_SHIFT;
        int rpm_atomic_seq;
@@ -2642,7 +2691,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
        struct drm_device *dev = vma->vm->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = vma->obj;
        const uint64_t size = min_t(uint64_t,
                                    obj->base.size,
@@ -2668,7 +2717,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool interruptible;
 
        interruptible = do_idling(dev_priv);
@@ -2727,11 +2776,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
        i915_address_space_init(&ggtt->base, dev_priv);
        ggtt->base.total += PAGE_SIZE;
 
-       if (intel_vgpu_active(dev)) {
-               ret = intel_vgt_balloon(dev);
-               if (ret)
-                       return ret;
-       }
+       ret = intel_vgt_balloon(dev_priv);
+       if (ret)
+               return ret;
 
        if (!HAS_LLC(dev))
                ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
@@ -2831,8 +2878,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
        i915_gem_cleanup_stolen(dev);
 
        if (drm_mm_initialized(&ggtt->base.mm)) {
-               if (intel_vgpu_active(dev))
-                       intel_vgt_deballoon();
+               intel_vgt_deballoon(dev_priv);
 
                drm_mm_takedown(&ggtt->base.mm);
                list_del(&ggtt->base.global_link);
@@ -3069,13 +3115,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
        ret = ggtt_probe_common(dev, ggtt->size);
 
-       ggtt->base.clear_range = gen8_ggtt_clear_range;
-       if (IS_CHERRYVIEW(dev_priv))
-               ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
-       else
-               ggtt->base.insert_entries = gen8_ggtt_insert_entries;
        ggtt->base.bind_vma = ggtt_bind_vma;
        ggtt->base.unbind_vma = ggtt_unbind_vma;
+       ggtt->base.insert_page = gen8_ggtt_insert_page;
+       ggtt->base.clear_range = nop_clear_range;
+       if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+               ggtt->base.clear_range = gen8_ggtt_clear_range;
+
+       ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+       if (IS_CHERRYVIEW(dev_priv))
+               ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
 
        return ret;
 }
@@ -3108,6 +3157,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
        ret = ggtt_probe_common(dev, ggtt->size);
 
        ggtt->base.clear_range = gen6_ggtt_clear_range;
+       ggtt->base.insert_page = gen6_ggtt_insert_page;
        ggtt->base.insert_entries = gen6_ggtt_insert_entries;
        ggtt->base.bind_vma = ggtt_bind_vma;
        ggtt->base.unbind_vma = ggtt_unbind_vma;
@@ -3129,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
-       ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
+       ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
        if (!ret) {
                DRM_ERROR("failed to set up gmch\n");
                return -EIO;
@@ -3138,7 +3188,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
                      &ggtt->mappable_base, &ggtt->mappable_end);
 
-       ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
+       ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
+       ggtt->base.insert_page = i915_ggtt_insert_page;
        ggtt->base.insert_entries = i915_ggtt_insert_entries;
        ggtt->base.clear_range = i915_ggtt_clear_range;
        ggtt->base.bind_vma = ggtt_bind_vma;
@@ -3219,14 +3270,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
        if (intel_iommu_gfx_mapped)
                DRM_INFO("VT-d active for gfx access\n");
 #endif
-       /*
-        * i915.enable_ppgtt is read-only, so do an early pass to validate the
-        * user's requested state against the hardware/driver capabilities.  We
-        * do this now so that we can print out any log messages once rather
-        * than every time we check intel_enable_ppgtt().
-        */
-       i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
-       DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 
        return 0;
 
@@ -3250,9 +3293,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
-       bool flush;
 
-       i915_check_and_clear_faults(dev);
+       i915_check_and_clear_faults(dev_priv);
 
        /* First fill our portion of the GTT with scratch pages */
        ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
@@ -3260,19 +3302,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
        /* Cache flush objects bound into GGTT and rebind them. */
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               flush = false;
                list_for_each_entry(vma, &obj->vma_list, obj_link) {
                        if (vma->vm != &ggtt->base)
                                continue;
 
                        WARN_ON(i915_vma_bind(vma, obj->cache_level,
                                              PIN_UPDATE));
-
-                       flush = true;
                }
 
-               if (flush)
-                       i915_gem_clflush_object(obj, obj->pin_display);
+               if (obj->pin_display)
+                       WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
        }
 
        if (INTEL_INFO(dev)->gen >= 8) {
@@ -3398,9 +3437,11 @@ static struct sg_table *
 intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
                          struct drm_i915_gem_object *obj)
 {
+       const size_t n_pages = obj->base.size / PAGE_SIZE;
        unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
        unsigned int size_pages_uv;
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
+       dma_addr_t dma_addr;
        unsigned long i;
        dma_addr_t *page_addr_list;
        struct sg_table *st;
@@ -3409,7 +3450,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
        int ret = -ENOMEM;
 
        /* Allocate a temporary list of source pages for random access. */
-       page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
+       page_addr_list = drm_malloc_gfp(n_pages,
                                        sizeof(dma_addr_t),
                                        GFP_TEMPORARY);
        if (!page_addr_list)
@@ -3432,11 +3473,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
 
        /* Populate source page list from the object. */
        i = 0;
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
-               i++;
-       }
+       for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
+               page_addr_list[i++] = dma_addr;
 
+       GEM_BUG_ON(i != n_pages);
        st->nents = 0;
        sg = st->sgl;
 
@@ -3634,3 +3674,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
                return obj->base.size;
        }
 }
+
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
+{
+       void __iomem *ptr;
+
+       lockdep_assert_held(&vma->vm->dev->struct_mutex);
+       if (WARN_ON(!vma->obj->map_and_fenceable))
+               return ERR_PTR(-ENODEV);
+
+       GEM_BUG_ON(!vma->is_ggtt);
+       GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+
+       ptr = vma->iomap;
+       if (ptr == NULL) {
+               ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
+                                       vma->node.start,
+                                       vma->node.size);
+               if (ptr == NULL)
+                       return ERR_PTR(-ENOMEM);
+
+               vma->iomap = ptr;
+       }
+
+       vma->pin_count++;
+       return ptr;
+}
index 0008543d55f616f1ddf3e0be66c5e9265dc0c98f..aa5f31d1c2edaaa4d41ee220c7b4b33005016ae8 100644 (file)
@@ -34,6 +34,8 @@
 #ifndef __I915_GEM_GTT_H__
 #define __I915_GEM_GTT_H__
 
+#include <linux/io-mapping.h>
+
 struct drm_i915_file_private;
 
 typedef uint32_t gen6_pte_t;
@@ -175,6 +177,7 @@ struct i915_vma {
        struct drm_mm_node node;
        struct drm_i915_gem_object *obj;
        struct i915_address_space *vm;
+       void __iomem *iomap;
 
        /** Flags and address space this VMA is bound to */
 #define GLOBAL_BIND    (1<<0)
@@ -316,6 +319,11 @@ struct i915_address_space {
                            uint64_t start,
                            uint64_t length,
                            bool use_scratch);
+       void (*insert_page)(struct i915_address_space *vm,
+                           dma_addr_t addr,
+                           uint64_t offset,
+                           enum i915_cache_level cache_level,
+                           u32 flags);
        void (*insert_entries)(struct i915_address_space *vm,
                               struct sg_table *st,
                               uint64_t start,
@@ -382,27 +390,27 @@ struct i915_hw_ppgtt {
        void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
 };
 
-/* For each pde iterates over every pde between from start until start + length.
- * If start, and start+length are not perfectly divisible, the macro will round
- * down, and up as needed. The macro modifies pde, start, and length. Dev is
- * only used to differentiate shift values. Temp is temp.  On gen6/7, start = 0,
- * and length = 2G effectively iterates over every PDE in the system.
- *
- * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+/*
+ * gen6_for_each_pde() iterates over every pde from start until start+length.
+ * If start and start+length are not perfectly divisible, the macro will round
+ * down and up as needed. Start=0 and length=2G effectively iterates over
+ * every PDE in the system. The macro modifies ALL its parameters except 'pd',
+ * so each of the other parameters should preferably be a simple variable, or
+ * at most an lvalue with no side-effects!
  */
-#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
-       for (iter = gen6_pde_index(start); \
-            length > 0 && iter < I915_PDES ? \
-                       (pt = (pd)->page_table[iter]), 1 : 0; \
-            iter++, \
-            temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
-            temp = min_t(unsigned, temp, length), \
-            start += temp, length -= temp)
-
-#define gen6_for_all_pdes(pt, ppgtt, iter)  \
-       for (iter = 0;          \
-            pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
-            iter++)
+#define gen6_for_each_pde(pt, pd, start, length, iter)                 \
+       for (iter = gen6_pde_index(start);                              \
+            length > 0 && iter < I915_PDES &&                          \
+               (pt = (pd)->page_table[iter], true);                    \
+            ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT);         \
+                   temp = min(temp - start, length);                   \
+                   start += temp, length -= temp; }), ++iter)
+
+#define gen6_for_all_pdes(pt, pd, iter)                                        \
+       for (iter = 0;                                                  \
+            iter < I915_PDES &&                                        \
+               (pt = (pd)->page_table[iter], true);                    \
+            ++iter)
 
 static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
 {
@@ -518,9 +526,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
 void i915_gem_init_ggtt(struct drm_device *dev);
 void i915_ggtt_cleanup_hw(struct drm_device *dev);
 
-int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 int i915_ppgtt_init_hw(struct drm_device *dev);
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
 void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
                                        struct drm_i915_file_private *fpriv);
@@ -535,7 +541,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
                kref_put(&ppgtt->ref, i915_ppgtt_release);
 }
 
-void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 
@@ -560,4 +566,36 @@ size_t
 i915_ggtt_view_size(struct drm_i915_gem_object *obj,
                    const struct i915_ggtt_view *view);
 
+/**
+ * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
+ * @vma: VMA to iomap
+ *
+ * The passed in VMA has to be pinned in the global GTT mappable region.
+ * An extra pinning of the VMA is acquired for the return iomapping,
+ * the caller must call i915_vma_unpin_iomap to relinquish the pinning
+ * after the iomapping is no longer required.
+ *
+ * Callers must hold the struct_mutex.
+ *
+ * Returns a valid iomapped pointer or ERR_PTR.
+ */
+void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
+
+/**
+ * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
+ * @vma: VMA to unpin
+ *
+ * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
+ *
+ * Callers must hold the struct_mutex. This function is only valid to be
+ * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
+ */
+static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+       lockdep_assert_held(&vma->vm->dev->struct_mutex);
+       GEM_BUG_ON(vma->pin_count == 0);
+       GEM_BUG_ON(vma->iomap == NULL);
+       vma->pin_count--;
+}
+
 #endif
index 71611bf21fcae34ed6f7869a2b464b25ece62f76..f75bbd67a13a2805768cb5ffb9735a3df49b8957 100644 (file)
@@ -29,7 +29,7 @@
 #include "intel_renderstate.h"
 
 static const struct intel_renderstate_rodata *
-render_state_get_rodata(struct drm_device *dev, const int gen)
+render_state_get_rodata(const int gen)
 {
        switch (gen) {
        case 6:
@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
        return NULL;
 }
 
-static int render_state_init(struct render_state *so, struct drm_device *dev)
+static int render_state_init(struct render_state *so,
+                            struct drm_i915_private *dev_priv)
 {
        int ret;
 
-       so->gen = INTEL_INFO(dev)->gen;
-       so->rodata = render_state_get_rodata(dev, so->gen);
+       so->gen = INTEL_GEN(dev_priv);
+       so->rodata = render_state_get_rodata(so->gen);
        if (so->rodata == NULL)
                return 0;
 
        if (so->rodata->batch_items * 4 > 4096)
                return -EINVAL;
 
-       so->obj = i915_gem_alloc_object(dev, 4096);
-       if (so->obj == NULL)
-               return -ENOMEM;
+       so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
+       if (IS_ERR(so->obj))
+               return PTR_ERR(so->obj);
 
        ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
        if (ret)
@@ -93,6 +94,7 @@ free_gem:
 
 static int render_state_setup(struct render_state *so)
 {
+       struct drm_device *dev = so->obj->base.dev;
        const struct intel_renderstate_rodata *rodata = so->rodata;
        unsigned int i = 0, reloc_index = 0;
        struct page *page;
@@ -134,6 +136,33 @@ static int render_state_setup(struct render_state *so)
 
        so->aux_batch_offset = i * sizeof(u32);
 
+       if (HAS_POOLED_EU(dev)) {
+               /*
+                * We always program 3x6 pool config but depending upon which
+                * subslice is disabled HW drops down to appropriate config
+                * shown below.
+                *
+                * In the below table 2x6 config always refers to
+                * fused-down version, native 2x6 is not available and can
+                * be ignored
+                *
+                * SNo  subslices config                eu pool configuration
+                * -----------------------------------------------------------
+                * 1    3 subslices enabled (3x6)  -    0x00777000  (9+9)
+                * 2    ss0 disabled (2x6)         -    0x00777000  (3+9)
+                * 3    ss1 disabled (2x6)         -    0x00770000  (6+6)
+                * 4    ss2 disabled (2x6)         -    0x00007000  (9+3)
+                */
+               u32 eu_pool_config = 0x00777000;
+
+               OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
+               OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
+               OUT_BATCH(d, i, eu_pool_config);
+               OUT_BATCH(d, i, 0);
+               OUT_BATCH(d, i, 0);
+               OUT_BATCH(d, i, 0);
+       }
+
        OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
        so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
 
@@ -177,7 +206,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
        if (WARN_ON(engine->id != RCS))
                return -ENOENT;
 
-       ret = render_state_init(so, engine->dev);
+       ret = render_state_init(so, engine->i915);
        if (ret)
                return ret;
 
index 425e721aac58e5cd0d7dd3d80f984f118fcfc960..6f10b421487b843cc03f0defe666c06ef060fe81 100644 (file)
@@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
        if (!mutex_is_locked(mutex))
                return false;
 
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
        return mutex->owner == task;
 #else
        /* Since UP may be pre-empted, we cannot assume that we own the lock */
@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
        unsigned long count = 0;
 
        trace_i915_gem_shrink(dev_priv, target, flags);
-       i915_gem_retire_requests(dev_priv->dev);
+       i915_gem_retire_requests(dev_priv);
+
+       /*
+        * Unbinding of objects will require HW access; Let us not wake the
+        * device just to recover a little memory. If absolutely necessary,
+        * we will force the wake during oom-notifier.
+        */
+       if ((flags & I915_SHRINK_BOUND) &&
+           !intel_runtime_pm_get_if_in_use(dev_priv))
+               flags &= ~I915_SHRINK_BOUND;
 
        /*
         * As we may completely rewrite the (un)bound list whilst unbinding
@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                list_splice(&still_in_list, phase->list);
        }
 
-       i915_gem_retire_requests(dev_priv->dev);
+       if (flags & I915_SHRINK_BOUND)
+               intel_runtime_pm_put(dev_priv);
+
+       i915_gem_retire_requests(dev_priv);
 
        return count;
 }
@@ -245,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
        struct drm_i915_private *dev_priv =
                container_of(shrinker, struct drm_i915_private, mm.shrinker);
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_i915_gem_object *obj;
        unsigned long count;
        bool unlock;
@@ -253,6 +265,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
        if (!i915_gem_shrinker_lock(dev, &unlock))
                return 0;
 
+       i915_gem_retire_requests(dev_priv);
+
        count = 0;
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (can_release_pages(obj))
@@ -274,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 {
        struct drm_i915_private *dev_priv =
                container_of(shrinker, struct drm_i915_private, mm.shrinker);
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        unsigned long freed;
        bool unlock;
 
@@ -309,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
 {
        unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
 
-       while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
+       while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
                schedule_timeout_killable(1);
                if (fatal_signal_pending(current))
                        return false;
@@ -330,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
 {
        dev_priv->mm.interruptible = slu->was_interruptible;
        if (slu->unlock)
-               mutex_unlock(&dev_priv->dev->struct_mutex);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
 static int
@@ -345,7 +359,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
        if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
                return NOTIFY_DONE;
 
+       intel_runtime_pm_get(dev_priv);
        freed_pages = i915_gem_shrink_all(dev_priv);
+       intel_runtime_pm_put(dev_priv);
 
        /* Because we may be allocating inside our own driver, we cannot
         * assert that there are no objects with pinned pages that are not
@@ -386,17 +402,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
        struct drm_i915_private *dev_priv =
                container_of(nb, struct drm_i915_private, mm.vmap_notifier);
        struct shrinker_lock_uninterruptible slu;
-       unsigned long freed_pages;
+       struct i915_vma *vma, *next;
+       unsigned long freed_pages = 0;
+       int ret;
 
        if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
                return NOTIFY_DONE;
 
-       freed_pages = i915_gem_shrink(dev_priv, -1UL,
-                                     I915_SHRINK_BOUND |
-                                     I915_SHRINK_UNBOUND |
-                                     I915_SHRINK_ACTIVE |
-                                     I915_SHRINK_VMAPS);
+       /* Force everything onto the inactive lists */
+       ret = i915_gem_wait_for_idle(dev_priv);
+       if (ret)
+               goto out;
+
+       intel_runtime_pm_get(dev_priv);
+       freed_pages += i915_gem_shrink(dev_priv, -1UL,
+                                      I915_SHRINK_BOUND |
+                                      I915_SHRINK_UNBOUND |
+                                      I915_SHRINK_ACTIVE |
+                                      I915_SHRINK_VMAPS);
+       intel_runtime_pm_put(dev_priv);
+
+       /* We also want to clear any cached iomaps as they wrap vmap */
+       list_for_each_entry_safe(vma, next,
+                                &dev_priv->ggtt.base.inactive_list, vm_link) {
+               unsigned long count = vma->node.size >> PAGE_SHIFT;
+               if (vma->iomap && i915_vma_unbind(vma) == 0)
+                       freed_pages += count;
+       }
 
+out:
        i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
 
        *(unsigned long *)ptr += freed_pages;
index b7ce963fb8f8d23345cab6cf0b6e4e446e6e1b3c..66be299a1486f4f656ba4e3970249a0e5e909aeb 100644 (file)
@@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
                return -ENODEV;
 
        /* See the comment at the drm_mm_init() call for more about this check.
-        * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
-       if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+        * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+        */
+       if (start < 4096 && (IS_GEN8(dev_priv) ||
+                            IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
                start = 4096;
 
        mutex_lock(&dev_priv->mm.stolen_lock);
@@ -109,9 +111,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen >= 3) {
                u32 bsm;
 
-               pci_read_config_dword(dev->pdev, BSM, &bsm);
+               pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
 
-               base = bsm & BSM_MASK;
+               base = bsm & INTEL_BSM_MASK;
        } else if (IS_I865G(dev)) {
                u16 toud = 0;
 
@@ -268,7 +270,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 
 void i915_gem_cleanup_stolen(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return;
@@ -548,7 +550,7 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
        if (obj->stolen) {
                i915_gem_stolen_remove_node(dev_priv, obj->stolen);
@@ -599,7 +601,7 @@ cleanup:
 struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *stolen;
        int ret;
index b9bdb34032cdcd861528446aec4ab625e3894050..8030199731dbb5856bdce6f38b9cd386550e98cf 100644 (file)
@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
        if (INTEL_INFO(obj->base.dev)->gen >= 4)
                return true;
 
-       if (INTEL_INFO(obj->base.dev)->gen == 3) {
+       if (IS_GEN3(obj->base.dev)) {
                if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
                        return false;
        } else {
@@ -162,7 +162,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                   struct drm_file *file)
 {
        struct drm_i915_gem_set_tiling *args = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        int ret = 0;
 
@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                 */
                if (obj->map_and_fenceable &&
                    !i915_gem_object_fence_ok(obj, args->tiling_mode))
-                       ret = i915_gem_object_ggtt_unbind(obj);
+                       ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
 
                if (ret == 0) {
                        if (obj->pages &&
@@ -294,7 +294,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
                   struct drm_file *file)
 {
        struct drm_i915_gem_get_tiling *args = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
 
        obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
index 32d9726e38b11878915ceef5c9f556fac1910459..2314c88323e39861152485ccfd1b8643ce8bc41f 100644 (file)
@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 static void
 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
 {
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
+       struct page *page;
 
        BUG_ON(obj->userptr.work != NULL);
        __i915_gem_userptr_set_active(obj, false);
@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
 
        i915_gem_gtt_finish_object(obj);
 
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct page *page = sg_page_iter_page(&sg_iter);
-
+       for_each_sgt_page(page, sgt_iter, obj->pages) {
                if (obj->dirty)
                        set_page_dirty(page);
 
@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
        return 0;
 }
 
-int
-i915_gem_init_userptr(struct drm_device *dev)
+void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        mutex_init(&dev_priv->mm_lock);
        hash_init(dev_priv->mm_structs);
-       return 0;
 }
index 89725c9efc2580c2adc061365456b3624bfbffdb..9d73d2216adc6925f09b97f76a71690daf213374 100644 (file)
@@ -332,7 +332,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                            const struct i915_error_state_file_priv *error_priv)
 {
        struct drm_device *dev = error_priv->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_error_state *error = error_priv->error;
        struct drm_i915_error_object *obj;
        int i, j, offset, elt;
@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
        }
 
-       if (INTEL_INFO(dev)->gen == 7)
+       if (IS_GEN7(dev))
                err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
        for (i = 0; i < ARRAY_SIZE(error->ring); i++)
@@ -463,6 +463,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                        }
                }
 
+               if (error->ring[i].num_waiters) {
+                       err_printf(m, "%s --- %d waiters\n",
+                                  dev_priv->engine[i].name,
+                                  error->ring[i].num_waiters);
+                       for (j = 0; j < error->ring[i].num_waiters; j++) {
+                               err_printf(m, " seqno 0x%08x for %s [%d]\n",
+                                          error->ring[i].waiters[j].seqno,
+                                          error->ring[i].waiters[j].comm,
+                                          error->ring[i].waiters[j].pid);
+                       }
+               }
+
                if ((obj = error->ring[i].ringbuffer)) {
                        err_printf(m, "%s --- ringbuffer = 0x%08x\n",
                                   dev_priv->engine[i].name,
@@ -488,7 +500,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                                           hws_page[elt+1],
                                           hws_page[elt+2],
                                           hws_page[elt+3]);
-                                       offset += 16;
+                               offset += 16;
                        }
                }
 
@@ -605,8 +617,9 @@ static void i915_error_state_free(struct kref *error_ref)
                i915_error_object_free(error->ring[i].ringbuffer);
                i915_error_object_free(error->ring[i].hws_page);
                i915_error_object_free(error->ring[i].ctx);
-               kfree(error->ring[i].requests);
                i915_error_object_free(error->ring[i].wa_ctx);
+               kfree(error->ring[i].requests);
+               kfree(error->ring[i].waiters);
        }
 
        i915_error_object_free(error->semaphore_obj);
@@ -824,19 +837,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
        return error_code;
 }
 
-static void i915_gem_record_fences(struct drm_device *dev,
+static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
                                   struct drm_i915_error_state *error)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       if (IS_GEN3(dev) || IS_GEN2(dev)) {
+       if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
                for (i = 0; i < dev_priv->num_fence_regs; i++)
                        error->fence[i] = I915_READ(FENCE_REG(i));
-       } else if (IS_GEN5(dev) || IS_GEN4(dev)) {
+       } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
                for (i = 0; i < dev_priv->num_fence_regs; i++)
                        error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       } else if (INTEL_GEN(dev_priv) >= 6) {
                for (i = 0; i < dev_priv->num_fence_regs; i++)
                        error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
        }
@@ -851,7 +863,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
        struct intel_engine_cs *to;
        enum intel_engine_id id;
 
-       if (!i915_semaphore_is_enabled(dev_priv->dev))
+       if (!i915_semaphore_is_enabled(dev_priv))
                return;
 
        if (!error->semaphore_obj)
@@ -893,31 +905,71 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
        }
 }
 
-static void i915_record_ring_state(struct drm_device *dev,
+static void engine_record_waiters(struct intel_engine_cs *engine,
+                                 struct drm_i915_error_ring *ering)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct drm_i915_error_waiter *waiter;
+       struct rb_node *rb;
+       int count;
+
+       ering->num_waiters = 0;
+       ering->waiters = NULL;
+
+       spin_lock(&b->lock);
+       count = 0;
+       for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
+               count++;
+       spin_unlock(&b->lock);
+
+       waiter = NULL;
+       if (count)
+               waiter = kmalloc_array(count,
+                                      sizeof(struct drm_i915_error_waiter),
+                                      GFP_ATOMIC);
+       if (!waiter)
+               return;
+
+       ering->waiters = waiter;
+
+       spin_lock(&b->lock);
+       for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+               struct intel_wait *w = container_of(rb, typeof(*w), node);
+
+               strcpy(waiter->comm, w->tsk->comm);
+               waiter->pid = w->tsk->pid;
+               waiter->seqno = w->seqno;
+               waiter++;
+
+               if (++ering->num_waiters == count)
+                       break;
+       }
+       spin_unlock(&b->lock);
+}
+
+static void i915_record_ring_state(struct drm_i915_private *dev_priv,
                                   struct drm_i915_error_state *error,
                                   struct intel_engine_cs *engine,
                                   struct drm_i915_error_ring *ering)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (INTEL_INFO(dev)->gen >= 6) {
+       if (INTEL_GEN(dev_priv) >= 6) {
                ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
                ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
-               if (INTEL_INFO(dev)->gen >= 8)
+               if (INTEL_GEN(dev_priv) >= 8)
                        gen8_record_semaphore_state(dev_priv, error, engine,
                                                    ering);
                else
                        gen6_record_semaphore_state(dev_priv, engine, ering);
        }
 
-       if (INTEL_INFO(dev)->gen >= 4) {
+       if (INTEL_GEN(dev_priv) >= 4) {
                ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
                ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
                ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
                ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
                ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
                ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
-               if (INTEL_INFO(dev)->gen >= 8) {
+               if (INTEL_GEN(dev_priv) >= 8) {
                        ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
                        ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
                }
@@ -929,20 +981,20 @@ static void i915_record_ring_state(struct drm_device *dev,
                ering->instdone = I915_READ(GEN2_INSTDONE);
        }
 
-       ering->waiting = waitqueue_active(&engine->irq_queue);
+       ering->waiting = intel_engine_has_waiter(engine);
        ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
        ering->acthd = intel_ring_get_active_head(engine);
-       ering->seqno = engine->get_seqno(engine);
+       ering->seqno = intel_engine_get_seqno(engine);
        ering->last_seqno = engine->last_submitted_seqno;
        ering->start = I915_READ_START(engine);
        ering->head = I915_READ_HEAD(engine);
        ering->tail = I915_READ_TAIL(engine);
        ering->ctl = I915_READ_CTL(engine);
 
-       if (I915_NEED_GFX_HWS(dev)) {
+       if (I915_NEED_GFX_HWS(dev_priv)) {
                i915_reg_t mmio;
 
-               if (IS_GEN7(dev)) {
+               if (IS_GEN7(dev_priv)) {
                        switch (engine->id) {
                        default:
                        case RCS:
@@ -958,7 +1010,7 @@ static void i915_record_ring_state(struct drm_device *dev,
                                mmio = VEBOX_HWS_PGA_GEN7;
                                break;
                        }
-               } else if (IS_GEN6(engine->dev)) {
+               } else if (IS_GEN6(engine->i915)) {
                        mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
                } else {
                        /* XXX: gen8 returns to sanity */
@@ -971,18 +1023,18 @@ static void i915_record_ring_state(struct drm_device *dev,
        ering->hangcheck_score = engine->hangcheck.score;
        ering->hangcheck_action = engine->hangcheck.action;
 
-       if (USES_PPGTT(dev)) {
+       if (USES_PPGTT(dev_priv)) {
                int i;
 
                ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
 
-               if (IS_GEN6(dev))
+               if (IS_GEN6(dev_priv))
                        ering->vm_info.pp_dir_base =
                                I915_READ(RING_PP_DIR_BASE_READ(engine));
-               else if (IS_GEN7(dev))
+               else if (IS_GEN7(dev_priv))
                        ering->vm_info.pp_dir_base =
                                I915_READ(RING_PP_DIR_BASE(engine));
-               else if (INTEL_INFO(dev)->gen >= 8)
+               else if (INTEL_GEN(dev_priv) >= 8)
                        for (i = 0; i < 4; i++) {
                                ering->vm_info.pdp[i] =
                                        I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -998,7 +1050,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
                                           struct drm_i915_error_state *error,
                                           struct drm_i915_error_ring *ering)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        struct drm_i915_gem_object *obj;
 
        /* Currently render ring is the only HW context user */
@@ -1016,34 +1068,33 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
        }
 }
 
-static void i915_gem_record_rings(struct drm_device *dev,
+static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
                                  struct drm_i915_error_state *error)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_request *request;
        int i, count;
 
        for (i = 0; i < I915_NUM_ENGINES; i++) {
                struct intel_engine_cs *engine = &dev_priv->engine[i];
-               struct intel_ringbuffer *rbuf;
 
                error->ring[i].pid = -1;
 
-               if (engine->dev == NULL)
+               if (!intel_engine_initialized(engine))
                        continue;
 
                error->ring[i].valid = true;
 
-               i915_record_ring_state(dev, error, engine, &error->ring[i]);
+               i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
+               engine_record_waiters(engine, &error->ring[i]);
 
                request = i915_gem_find_active_request(engine);
                if (request) {
                        struct i915_address_space *vm;
+                       struct intel_ringbuffer *rb;
 
-                       vm = request->ctx && request->ctx->ppgtt ?
-                               &request->ctx->ppgtt->base :
-                               &ggtt->base;
+                       vm = request->ctx->ppgtt ?
+                               &request->ctx->ppgtt->base : &ggtt->base;
 
                        /* We need to copy these to an anonymous buffer
                         * as the simplest method to avoid being overwritten
@@ -1070,26 +1121,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
                                }
                                rcu_read_unlock();
                        }
-               }
 
-               if (i915.enable_execlists) {
-                       /* TODO: This is only a small fix to keep basic error
-                        * capture working, but we need to add more information
-                        * for it to be useful (e.g. dump the context being
-                        * executed).
-                        */
-                       if (request)
-                               rbuf = request->ctx->engine[engine->id].ringbuf;
-                       else
-                               rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
-               } else
-                       rbuf = engine->buffer;
-
-               error->ring[i].cpu_ring_head = rbuf->head;
-               error->ring[i].cpu_ring_tail = rbuf->tail;
+                       error->simulated |=
+                               request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
 
-               error->ring[i].ringbuffer =
-                       i915_error_ggtt_object_create(dev_priv, rbuf->obj);
+                       rb = request->ringbuf;
+                       error->ring[i].cpu_ring_head = rb->head;
+                       error->ring[i].cpu_ring_tail = rb->tail;
+                       error->ring[i].ringbuffer =
+                               i915_error_ggtt_object_create(dev_priv,
+                                                             rb->obj);
+               }
 
                error->ring[i].hws_page =
                        i915_error_ggtt_object_create(dev_priv,
@@ -1234,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
 static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
                                   struct drm_i915_error_state *error)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int i;
 
        /* General organization
@@ -1301,15 +1343,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
        error->eir = I915_READ(EIR);
        error->pgtbl_er = I915_READ(PGTBL_ER);
 
-       i915_get_extra_instdone(dev, error->extra_instdone);
+       i915_get_extra_instdone(dev_priv, error->extra_instdone);
 }
 
-static void i915_error_capture_msg(struct drm_device *dev,
+static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
                                   struct drm_i915_error_state *error,
                                   u32 engine_mask,
                                   const char *error_msg)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 ecode;
        int ring_id = -1, len;
 
@@ -1317,7 +1358,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
 
        len = scnprintf(error->error_msg, sizeof(error->error_msg),
                        "GPU HANG: ecode %d:%d:0x%08x",
-                       INTEL_INFO(dev)->gen, ring_id, ecode);
+                       INTEL_GEN(dev_priv), ring_id, ecode);
 
        if (ring_id != -1 && error->ring[ring_id].pid != -1)
                len += scnprintf(error->error_msg + len,
@@ -1352,14 +1393,17 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
  * out a structure which becomes available in debugfs for user level tools
  * to pick up.
  */
-void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
+void i915_capture_error_state(struct drm_i915_private *dev_priv,
+                             u32 engine_mask,
                              const char *error_msg)
 {
        static bool warned;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_error_state *error;
        unsigned long flags;
 
+       if (READ_ONCE(dev_priv->gpu_error.first_error))
+               return;
+
        /* Account for pipe specific data like PIPE*STAT */
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
        if (!error) {
@@ -1372,23 +1416,25 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
        i915_capture_gen_state(dev_priv, error);
        i915_capture_reg_state(dev_priv, error);
        i915_gem_capture_buffers(dev_priv, error);
-       i915_gem_record_fences(dev, error);
-       i915_gem_record_rings(dev, error);
+       i915_gem_record_fences(dev_priv, error);
+       i915_gem_record_rings(dev_priv, error);
 
        do_gettimeofday(&error->time);
 
-       error->overlay = intel_overlay_capture_error_state(dev);
-       error->display = intel_display_capture_error_state(dev);
+       error->overlay = intel_overlay_capture_error_state(dev_priv);
+       error->display = intel_display_capture_error_state(dev_priv);
 
-       i915_error_capture_msg(dev, error, engine_mask, error_msg);
+       i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
        DRM_INFO("%s\n", error->error_msg);
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       if (dev_priv->gpu_error.first_error == NULL) {
-               dev_priv->gpu_error.first_error = error;
-               error = NULL;
+       if (!error->simulated) {
+               spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+               if (!dev_priv->gpu_error.first_error) {
+                       dev_priv->gpu_error.first_error = error;
+                       error = NULL;
+               }
+               spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
        }
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 
        if (error) {
                i915_error_state_free(&error->ref);
@@ -1400,7 +1446,8 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
                DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
                DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
                DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
-               DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+               DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+                        dev_priv->drm.primary->index);
                warned = true;
        }
 }
@@ -1408,7 +1455,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
 void i915_error_state_get(struct drm_device *dev,
                          struct i915_error_state_file_priv *error_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        spin_lock_irq(&dev_priv->gpu_error.lock);
        error_priv->error = dev_priv->gpu_error.first_error;
@@ -1426,7 +1473,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
 
 void i915_destroy_error_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_error_state *error;
 
        spin_lock_irq(&dev_priv->gpu_error.lock);
@@ -1450,17 +1497,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
 }
 
 /* NB: please notice the memset */
-void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
+                            uint32_t *instdone)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
 
-       if (IS_GEN2(dev) || IS_GEN3(dev))
+       if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
                instdone[0] = I915_READ(GEN2_INSTDONE);
-       else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+       else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
                instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
                instdone[1] = I915_READ(GEN4_INSTDONE1);
-       } else if (INTEL_INFO(dev)->gen >= 7) {
+       } else if (INTEL_GEN(dev_priv) >= 7) {
                instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
                instdone[1] = I915_READ(GEN7_SC_INSTDONE);
                instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
index 80786d9f9ad365c83de4d12b2713608894051cfb..cf5a65be4fe0609247fdfc3147349230e5aa6c48 100644 (file)
 #define   GUC_WOPCM_OFFSET_VALUE         0x80000       /* 512KB */
 #define GUC_MAX_IDLE_COUNT             _MMIO(0xC3E4)
 
+/* Defines WOPCM space available to GuC firmware */
 #define GUC_WOPCM_SIZE                 _MMIO(0xc050)
-#define   GUC_WOPCM_SIZE_VALUE           (0x80 << 12)  /* 512KB */
-
 /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
-#define        GUC_WOPCM_TOP                   (GUC_WOPCM_SIZE_VALUE)
+#define   GUC_WOPCM_TOP                          (0x80 << 12)  /* 512KB */
+#define   BXT_GUC_WOPCM_RC6_RESERVED     (0x10 << 12)  /* 64KB  */
 
 #define GEN8_GT_PM_CONFIG              _MMIO(0x138140)
 #define GEN9LP_GT_PM_CONFIG            _MMIO(0x138140)
index d40c13fb6643773c41774b7f8519a804bf33b39b..2112e029db6a929ea7f55c4266ffbe24af860d83 100644 (file)
@@ -97,8 +97,14 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
 
        I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
 
-       /* No HOST2GUC command should take longer than 10ms */
-       ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10);
+       /*
+        * Fast commands should complete in less than 10us, so sample quickly
+        * up to that length of time, then switch to a slower sleep-wait loop.
+        * No HOST2GUC command should ever take longer than 10ms.
+        */
+       ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
+       if (ret)
+               ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
        if (status != GUC2HOST_STATUS_SUCCESS) {
                /*
                 * Either the GuC explicitly returned an error (which
@@ -153,13 +159,11 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
                                     struct i915_guc_client *client)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct drm_device *dev = dev_priv->dev;
        u32 data[2];
 
        data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
        /* WaRsDisableCoarsePowerGating:skl,bxt */
-       if (!intel_enable_rc6(dev) ||
-           NEEDS_WaRsDisableCoarsePowerGating(dev))
+       if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
                data[1] = 0;
        else
                /* bit 0 and 1 are for Render and Media domain separately */
@@ -175,94 +179,88 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
  * client object which contains the page being used for the doorbell
  */
 
-static void guc_init_doorbell(struct intel_guc *guc,
-                             struct i915_guc_client *client)
+static int guc_update_doorbell_id(struct intel_guc *guc,
+                                 struct i915_guc_client *client,
+                                 u16 new_id)
 {
+       struct sg_table *sg = guc->ctx_pool_obj->pages;
+       void *doorbell_bitmap = guc->doorbell_bitmap;
        struct guc_doorbell_info *doorbell;
+       struct guc_context_desc desc;
+       size_t len;
 
        doorbell = client->client_base + client->doorbell_offset;
 
-       doorbell->db_status = GUC_DOORBELL_ENABLED;
-       doorbell->cookie = 0;
-}
-
-static int guc_ring_doorbell(struct i915_guc_client *gc)
-{
-       struct guc_process_desc *desc;
-       union guc_doorbell_qw db_cmp, db_exc, db_ret;
-       union guc_doorbell_qw *db;
-       int attempt = 2, ret = -EAGAIN;
-
-       desc = gc->client_base + gc->proc_desc_offset;
-
-       /* Update the tail so it is visible to GuC */
-       desc->tail = gc->wq_tail;
-
-       /* current cookie */
-       db_cmp.db_status = GUC_DOORBELL_ENABLED;
-       db_cmp.cookie = gc->cookie;
-
-       /* cookie to be updated */
-       db_exc.db_status = GUC_DOORBELL_ENABLED;
-       db_exc.cookie = gc->cookie + 1;
-       if (db_exc.cookie == 0)
-               db_exc.cookie = 1;
-
-       /* pointer of current doorbell cacheline */
-       db = gc->client_base + gc->doorbell_offset;
-
-       while (attempt--) {
-               /* lets ring the doorbell */
-               db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
-                       db_cmp.value_qw, db_exc.value_qw);
-
-               /* if the exchange was successfully executed */
-               if (db_ret.value_qw == db_cmp.value_qw) {
-                       /* db was successfully rung */
-                       gc->cookie = db_exc.cookie;
-                       ret = 0;
-                       break;
-               }
+       if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
+           test_bit(client->doorbell_id, doorbell_bitmap)) {
+               /* Deactivate the old doorbell */
+               doorbell->db_status = GUC_DOORBELL_DISABLED;
+               (void)host2guc_release_doorbell(guc, client);
+               __clear_bit(client->doorbell_id, doorbell_bitmap);
+       }
 
-               /* XXX: doorbell was lost and need to acquire it again */
-               if (db_ret.db_status == GUC_DOORBELL_DISABLED)
-                       break;
+       /* Update the GuC's idea of the doorbell ID */
+       len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+                            sizeof(desc) * client->ctx_index);
+       if (len != sizeof(desc))
+               return -EFAULT;
+       desc.db_id = new_id;
+       len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
+                            sizeof(desc) * client->ctx_index);
+       if (len != sizeof(desc))
+               return -EFAULT;
 
-               DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
-                         db_cmp.cookie, db_ret.cookie);
+       client->doorbell_id = new_id;
+       if (new_id == GUC_INVALID_DOORBELL_ID)
+               return 0;
 
-               /* update the cookie to newly read cookie from GuC */
-               db_cmp.cookie = db_ret.cookie;
-               db_exc.cookie = db_ret.cookie + 1;
-               if (db_exc.cookie == 0)
-                       db_exc.cookie = 1;
-       }
+       /* Activate the new doorbell */
+       __set_bit(new_id, doorbell_bitmap);
+       doorbell->cookie = 0;
+       doorbell->db_status = GUC_DOORBELL_ENABLED;
+       return host2guc_allocate_doorbell(guc, client);
+}
 
-       return ret;
+static int guc_init_doorbell(struct intel_guc *guc,
+                             struct i915_guc_client *client,
+                             uint16_t db_id)
+{
+       return guc_update_doorbell_id(guc, client, db_id);
 }
 
 static void guc_disable_doorbell(struct intel_guc *guc,
                                 struct i915_guc_client *client)
 {
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct guc_doorbell_info *doorbell;
-       i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
-       int value;
-
-       doorbell = client->client_base + client->doorbell_offset;
+       (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
 
-       doorbell->db_status = GUC_DOORBELL_DISABLED;
+       /* XXX: wait for any interrupts */
+       /* XXX: wait for workqueue to drain */
+}
 
-       I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
+static uint16_t
+select_doorbell_register(struct intel_guc *guc, uint32_t priority)
+{
+       /*
+        * The bitmap tracks which doorbell registers are currently in use.
+        * It is split into two halves; the first half is used for normal
+        * priority contexts, the second half for high-priority ones.
+        * Note that logically higher priorities are numerically less than
+        * normal ones, so the test below means "is it high-priority?"
+        */
+       const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
+       const uint16_t half = GUC_MAX_DOORBELLS / 2;
+       const uint16_t start = hi_pri ? half : 0;
+       const uint16_t end = start + half;
+       uint16_t id;
 
-       value = I915_READ(drbreg);
-       WARN_ON((value & GEN8_DRB_VALID) != 0);
+       id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
+       if (id == end)
+               id = GUC_INVALID_DOORBELL_ID;
 
-       I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0);
-       I915_WRITE(drbreg, 0);
+       DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
+                       hi_pri ? "high" : "normal", id);
 
-       /* XXX: wait for any interrupts */
-       /* XXX: wait for workqueue to drain */
+       return id;
 }
 
 /*
@@ -289,37 +287,6 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
        return offset;
 }
 
-static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
-{
-       /*
-        * The bitmap is split into two halves; the first half is used for
-        * normal priority contexts, the second half for high-priority ones.
-        * Note that logically higher priorities are numerically less than
-        * normal ones, so the test below means "is it high-priority?"
-        */
-       const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
-       const uint16_t half = GUC_MAX_DOORBELLS / 2;
-       const uint16_t start = hi_pri ? half : 0;
-       const uint16_t end = start + half;
-       uint16_t id;
-
-       id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
-       if (id == end)
-               id = GUC_INVALID_DOORBELL_ID;
-       else
-               bitmap_set(guc->doorbell_bitmap, id, 1);
-
-       DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
-                       hi_pri ? "high" : "normal", id);
-
-       return id;
-}
-
-static void release_doorbell(struct intel_guc *guc, uint16_t id)
-{
-       bitmap_clear(guc->doorbell_bitmap, id, 1);
-}
-
 /*
  * Initialise the process descriptor shared with the GuC firmware.
  */
@@ -361,10 +328,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
        struct drm_i915_gem_object *client_obj = client->client_obj;
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct intel_engine_cs *engine;
-       struct intel_context *ctx = client->owner;
+       struct i915_gem_context *ctx = client->owner;
        struct guc_context_desc desc;
        struct sg_table *sg;
-       enum intel_engine_id id;
        u32 gfx_addr;
 
        memset(&desc, 0, sizeof(desc));
@@ -374,10 +340,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
        desc.priority = client->priority;
        desc.db_id = client->doorbell_id;
 
-       for_each_engine_id(engine, dev_priv, id) {
+       for_each_engine(engine, dev_priv) {
+               struct intel_context *ce = &ctx->engine[engine->id];
                struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
                struct drm_i915_gem_object *obj;
-               uint64_t ctx_desc;
 
                /* TODO: We have a design issue to be solved here. Only when we
                 * receive the first batch, we know which engine is used by the
@@ -386,20 +352,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
                 * for now who owns a GuC client. But for future owner of GuC
                 * client, need to make sure lrc is pinned prior to enter here.
                 */
-               obj = ctx->engine[id].state;
-               if (!obj)
+               if (!ce->state)
                        break;  /* XXX: continue? */
 
-               ctx_desc = intel_lr_context_descriptor(ctx, engine);
-               lrc->context_desc = (u32)ctx_desc;
+               lrc->context_desc = lower_32_bits(ce->lrc_desc);
 
                /* The state page is after PPHWSP */
-               gfx_addr = i915_gem_obj_ggtt_offset(obj);
+               gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
                lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
                lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
                                (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
 
-               obj = ctx->engine[id].ringbuf->obj;
+               obj = ce->ringbuf->obj;
                gfx_addr = i915_gem_obj_ggtt_offset(obj);
 
                lrc->ring_begin = gfx_addr;
@@ -427,7 +391,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
        desc.wq_size = client->wq_size;
 
        /*
-        * XXX: Take LRCs from an existing intel_context if this is not an
+        * XXX: Take LRCs from an existing context if this is not an
         * IsKMDCreatedContext client
         */
        desc.desc_private = (uintptr_t)client;
@@ -451,47 +415,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
                             sizeof(desc) * client->ctx_index);
 }
 
-int i915_guc_wq_check_space(struct i915_guc_client *gc)
+/**
+ * i915_guc_wq_check_space() - check that the GuC can accept a request
+ * @request:   request associated with the commands
+ *
+ * Return:     0 if space is available
+ *             -EAGAIN if space is not currently available
+ *
+ * This function must be called (and must return 0) before a request
+ * is submitted to the GuC via i915_guc_submit() below. Once a result
+ * of 0 has been returned, it remains valid until (but only until)
+ * the next call to submit().
+ *
+ * This precheck allows the caller to determine in advance that space
+ * will be available for the next submission before committing resources
+ * to it, and helps avoid late failures with complicated recovery paths.
+ */
+int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
 {
+       const size_t wqi_size = sizeof(struct guc_wq_item);
+       struct i915_guc_client *gc = request->i915->guc.execbuf_client;
        struct guc_process_desc *desc;
-       u32 size = sizeof(struct guc_wq_item);
-       int ret = -ETIMEDOUT, timeout_counter = 200;
+       u32 freespace;
 
-       if (!gc)
-               return 0;
+       GEM_BUG_ON(gc == NULL);
 
        desc = gc->client_base + gc->proc_desc_offset;
 
-       while (timeout_counter-- > 0) {
-               if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
-                       ret = 0;
-                       break;
-               }
+       freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+       if (likely(freespace >= wqi_size))
+               return 0;
 
-               if (timeout_counter)
-                       usleep_range(1000, 2000);
-       };
+       gc->no_wq_space += 1;
 
-       return ret;
+       return -EAGAIN;
 }
 
-static int guc_add_workqueue_item(struct i915_guc_client *gc,
-                                 struct drm_i915_gem_request *rq)
+static void guc_add_workqueue_item(struct i915_guc_client *gc,
+                                  struct drm_i915_gem_request *rq)
 {
+       /* wqi_len is in DWords, and does not include the one-word header */
+       const size_t wqi_size = sizeof(struct guc_wq_item);
+       const u32 wqi_len = wqi_size/sizeof(u32) - 1;
        struct guc_process_desc *desc;
        struct guc_wq_item *wqi;
        void *base;
-       u32 tail, wq_len, wq_off, space;
+       u32 freespace, tail, wq_off, wq_page;
 
        desc = gc->client_base + gc->proc_desc_offset;
-       space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
-       if (WARN_ON(space < sizeof(struct guc_wq_item)))
-               return -ENOSPC; /* shouldn't happen */
 
-       /* postincrement WQ tail for next time */
-       wq_off = gc->wq_tail;
-       gc->wq_tail += sizeof(struct guc_wq_item);
-       gc->wq_tail &= gc->wq_size - 1;
+       /* Free space is guaranteed, see i915_guc_wq_check_space() above */
+       freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+       GEM_BUG_ON(freespace < wqi_size);
+
+       /* The GuC firmware wants the tail index in QWords, not bytes */
+       tail = rq->tail;
+       GEM_BUG_ON(tail & 7);
+       tail >>= 3;
+       GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
 
        /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
         * should not have the case where structure wqi is across page, neither
@@ -500,19 +481,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
         * XXX: if not the case, we need save data to a temp wqi and copy it to
         * workqueue buffer dw by dw.
         */
-       WARN_ON(sizeof(struct guc_wq_item) != 16);
-       WARN_ON(wq_off & 3);
+       BUILD_BUG_ON(wqi_size != 16);
+
+       /* postincrement WQ tail for next time */
+       wq_off = gc->wq_tail;
+       gc->wq_tail += wqi_size;
+       gc->wq_tail &= gc->wq_size - 1;
+       GEM_BUG_ON(wq_off & (wqi_size - 1));
 
-       /* wq starts from the page after doorbell / process_desc */
-       base = kmap_atomic(i915_gem_object_get_page(gc->client_obj,
-                       (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
+       /* WQ starts from the page after doorbell / process_desc */
+       wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
        wq_off &= PAGE_SIZE - 1;
+       base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
        wqi = (struct guc_wq_item *)((char *)base + wq_off);
 
-       /* len does not include the header */
-       wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
+       /* Now fill in the 4-word work queue item */
        wqi->header = WQ_TYPE_INORDER |
-                       (wq_len << WQ_LEN_SHIFT) |
+                       (wqi_len << WQ_LEN_SHIFT) |
                        (rq->engine->guc_id << WQ_TARGET_SHIFT) |
                        WQ_NO_WCFLUSH_WAIT;
 
@@ -520,48 +505,105 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
        wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
                                                             rq->engine);
 
-       /* The GuC firmware wants the tail index in QWords, not bytes */
-       tail = rq->ringbuf->tail >> 3;
        wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
-       wqi->fence_id = 0; /*XXX: what fence to be here */
+       wqi->fence_id = rq->seqno;
 
        kunmap_atomic(base);
+}
 
-       return 0;
+static int guc_ring_doorbell(struct i915_guc_client *gc)
+{
+       struct guc_process_desc *desc;
+       union guc_doorbell_qw db_cmp, db_exc, db_ret;
+       union guc_doorbell_qw *db;
+       int attempt = 2, ret = -EAGAIN;
+
+       desc = gc->client_base + gc->proc_desc_offset;
+
+       /* Update the tail so it is visible to GuC */
+       desc->tail = gc->wq_tail;
+
+       /* current cookie */
+       db_cmp.db_status = GUC_DOORBELL_ENABLED;
+       db_cmp.cookie = gc->cookie;
+
+       /* cookie to be updated */
+       db_exc.db_status = GUC_DOORBELL_ENABLED;
+       db_exc.cookie = gc->cookie + 1;
+       if (db_exc.cookie == 0)
+               db_exc.cookie = 1;
+
+       /* pointer of current doorbell cacheline */
+       db = gc->client_base + gc->doorbell_offset;
+
+       while (attempt--) {
+               /* lets ring the doorbell */
+               db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
+                       db_cmp.value_qw, db_exc.value_qw);
+
+               /* if the exchange was successfully executed */
+               if (db_ret.value_qw == db_cmp.value_qw) {
+                       /* db was successfully rung */
+                       gc->cookie = db_exc.cookie;
+                       ret = 0;
+                       break;
+               }
+
+               /* XXX: doorbell was lost and need to acquire it again */
+               if (db_ret.db_status == GUC_DOORBELL_DISABLED)
+                       break;
+
+               DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
+                         db_cmp.cookie, db_ret.cookie);
+
+               /* update the cookie to newly read cookie from GuC */
+               db_cmp.cookie = db_ret.cookie;
+               db_exc.cookie = db_ret.cookie + 1;
+               if (db_exc.cookie == 0)
+                       db_exc.cookie = 1;
+       }
+
+       return ret;
 }
 
 /**
  * i915_guc_submit() - Submit commands through GuC
- * @client:    the guc client where commands will go through
  * @rq:                request associated with the commands
  *
- * Return:     0 if succeed
+ * Return:     0 on success, otherwise an errno.
+ *             (Note: nonzero really shouldn't happen!)
+ *
+ * The caller must have already called i915_guc_wq_check_space() above
+ * with a result of 0 (success) since the last request submission. This
+ * guarantees that there is space in the work queue for the new request,
+ * so enqueuing the item cannot fail.
+ *
+ * Bad Things Will Happen if the caller violates this protocol e.g. calls
+ * submit() when check() says there's no space, or calls submit() multiple
+ * times with no intervening check().
+ *
+ * The only error here arises if the doorbell hardware isn't functioning
+ * as expected, which really shouln't happen.
  */
-int i915_guc_submit(struct i915_guc_client *client,
-                   struct drm_i915_gem_request *rq)
+int i915_guc_submit(struct drm_i915_gem_request *rq)
 {
-       struct intel_guc *guc = client->guc;
-       unsigned int engine_id = rq->engine->guc_id;
-       int q_ret, b_ret;
+       unsigned int engine_id = rq->engine->id;
+       struct intel_guc *guc = &rq->i915->guc;
+       struct i915_guc_client *client = guc->execbuf_client;
+       int b_ret;
 
-       q_ret = guc_add_workqueue_item(client, rq);
-       if (q_ret == 0)
-               b_ret = guc_ring_doorbell(client);
+       guc_add_workqueue_item(client, rq);
+       b_ret = guc_ring_doorbell(client);
 
        client->submissions[engine_id] += 1;
-       if (q_ret) {
-               client->q_fail += 1;
-               client->retcode = q_ret;
-       } else if (b_ret) {
+       client->retcode = b_ret;
+       if (b_ret)
                client->b_fail += 1;
-               client->retcode = q_ret = b_ret;
-       } else {
-               client->retcode = 0;
-       }
+
        guc->submissions[engine_id] += 1;
        guc->last_seqno[engine_id] = rq->seqno;
 
-       return q_ret;
+       return b_ret;
 }
 
 /*
@@ -572,7 +614,7 @@ int i915_guc_submit(struct i915_guc_client *client,
 
 /**
  * gem_allocate_guc_obj() - Allocate gem object for GuC usage
- * @dev:       drm device
+ * @dev_priv:  driver private data structure
  * @size:      size of object
  *
  * This is a wrapper to create a gem obj. In order to use it inside GuC, the
@@ -581,14 +623,13 @@ int i915_guc_submit(struct i915_guc_client *client,
  *
  * Return:     A drm_i915_gem_object if successful, otherwise NULL.
  */
-static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
-                                                       u32 size)
+static struct drm_i915_gem_object *
+gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
-       obj = i915_gem_alloc_object(dev, size);
-       if (!obj)
+       obj = i915_gem_object_create(&dev_priv->drm, size);
+       if (IS_ERR(obj))
                return NULL;
 
        if (i915_gem_object_get_pages(obj)) {
@@ -623,10 +664,10 @@ static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
        drm_gem_object_unreference(&obj->base);
 }
 
-static void guc_client_free(struct drm_device *dev,
-                           struct i915_guc_client *client)
+static void
+guc_client_free(struct drm_i915_private *dev_priv,
+               struct i915_guc_client *client)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
 
        if (!client)
@@ -639,17 +680,10 @@ static void guc_client_free(struct drm_device *dev,
 
        if (client->client_base) {
                /*
-                * If we got as far as setting up a doorbell, make sure
-                * we shut it down before unmapping & deallocating the
-                * memory. So first disable the doorbell, then tell the
-                * GuC that we've finished with it, finally deallocate
-                * it in our bitmap
+                * If we got as far as setting up a doorbell, make sure we
+                * shut it down before unmapping & deallocating the memory.
                 */
-               if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
-                       guc_disable_doorbell(guc, client);
-                       host2guc_release_doorbell(guc, client);
-                       release_doorbell(guc, client->doorbell_id);
-               }
+               guc_disable_doorbell(guc, client);
 
                kunmap(kmap_to_page(client->client_base));
        }
@@ -664,9 +698,51 @@ static void guc_client_free(struct drm_device *dev,
        kfree(client);
 }
 
+/*
+ * Borrow the first client to set up & tear down every doorbell
+ * in turn, to ensure that all doorbell h/w is (re)initialised.
+ */
+static void guc_init_doorbell_hw(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct i915_guc_client *client = guc->execbuf_client;
+       uint16_t db_id, i;
+       int err;
+
+       db_id = client->doorbell_id;
+
+       for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
+               i915_reg_t drbreg = GEN8_DRBREGL(i);
+               u32 value = I915_READ(drbreg);
+
+               err = guc_update_doorbell_id(guc, client, i);
+
+               /* Report update failure or unexpectedly active doorbell */
+               if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED)))
+                       DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n",
+                                         i, drbreg.reg, value, err);
+       }
+
+       /* Restore to original value */
+       err = guc_update_doorbell_id(guc, client, db_id);
+       if (err)
+               DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
+                       db_id, err);
+
+       for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
+               i915_reg_t drbreg = GEN8_DRBREGL(i);
+               u32 value = I915_READ(drbreg);
+
+               if (i != db_id && (value & GUC_DOORBELL_ENABLED))
+                       DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n",
+                                         i, drbreg.reg, value);
+
+       }
+}
+
 /**
  * guc_client_alloc() - Allocate an i915_guc_client
- * @dev:       drm device
+ * @dev_priv:  driver private data structure
  * @priority:  four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
  *             The kernel client to replace ExecList submission is created with
  *             NORMAL priority. Priority of a client for scheduler can be HIGH,
@@ -676,14 +752,15 @@ static void guc_client_free(struct drm_device *dev,
  *
  * Return:     An i915_guc_client object if success, else NULL.
  */
-static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
-                                               uint32_t priority,
-                                               struct intel_context *ctx)
+static struct i915_guc_client *
+guc_client_alloc(struct drm_i915_private *dev_priv,
+                uint32_t priority,
+                struct i915_gem_context *ctx)
 {
        struct i915_guc_client *client;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
        struct drm_i915_gem_object *obj;
+       uint16_t db_id;
 
        client = kzalloc(sizeof(*client), GFP_KERNEL);
        if (!client)
@@ -702,7 +779,7 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
        }
 
        /* The first page is doorbell/proc_desc. Two followed pages are wq. */
-       obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE);
+       obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
        if (!obj)
                goto err;
 
@@ -712,6 +789,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
        client->wq_offset = GUC_DB_SIZE;
        client->wq_size = GUC_WQ_SIZE;
 
+       db_id = select_doorbell_register(guc, client->priority);
+       if (db_id == GUC_INVALID_DOORBELL_ID)
+               /* XXX: evict a doorbell instead? */
+               goto err;
+
        client->doorbell_offset = select_doorbell_cacheline(guc);
 
        /*
@@ -724,29 +806,22 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
        else
                client->proc_desc_offset = (GUC_DB_SIZE / 2);
 
-       client->doorbell_id = assign_doorbell(guc, client->priority);
-       if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
-               /* XXX: evict a doorbell instead */
-               goto err;
-
        guc_init_proc_desc(guc, client);
        guc_init_ctx_desc(guc, client);
-       guc_init_doorbell(guc, client);
-
-       /* XXX: Any cache flushes needed? General domain mgmt calls? */
-
-       if (host2guc_allocate_doorbell(guc, client))
+       if (guc_init_doorbell(guc, client, db_id))
                goto err;
 
-       DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n",
-               priority, client, client->ctx_index, client->doorbell_id);
+       DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n",
+               priority, client, client->ctx_index);
+       DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
+               client->doorbell_id, client->doorbell_offset);
 
        return client;
 
 err:
        DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
 
-       guc_client_free(dev, client);
+       guc_client_free(dev_priv, client);
        return NULL;
 }
 
@@ -771,7 +846,7 @@ static void guc_create_log(struct intel_guc *guc)
 
        obj = guc->log_obj;
        if (!obj) {
-               obj = gem_allocate_guc_obj(dev_priv->dev, size);
+               obj = gem_allocate_guc_obj(dev_priv, size);
                if (!obj) {
                        /* logging will be off */
                        i915.guc_log_level = -1;
@@ -831,7 +906,7 @@ static void guc_create_ads(struct intel_guc *guc)
 
        obj = guc->ads_obj;
        if (!obj) {
-               obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
+               obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size));
                if (!obj)
                        return;
 
@@ -885,66 +960,65 @@ static void guc_create_ads(struct intel_guc *guc)
  * Set up the memory resources to be shared with the GuC.  At this point,
  * we require just one object that can be mapped through the GGTT.
  */
-int i915_guc_submission_init(struct drm_device *dev)
+int i915_guc_submission_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        const size_t ctxsize = sizeof(struct guc_context_desc);
        const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
        const size_t gemsize = round_up(poolsize, PAGE_SIZE);
        struct intel_guc *guc = &dev_priv->guc;
 
+       /* Wipe bitmap & delete client in case of reinitialisation */
+       bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
+       i915_guc_submission_disable(dev_priv);
+
        if (!i915.enable_guc_submission)
                return 0; /* not enabled  */
 
        if (guc->ctx_pool_obj)
                return 0; /* already allocated */
 
-       guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize);
+       guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize);
        if (!guc->ctx_pool_obj)
                return -ENOMEM;
 
        ida_init(&guc->ctx_ids);
-
        guc_create_log(guc);
-
        guc_create_ads(guc);
 
        return 0;
 }
 
-int i915_guc_submission_enable(struct drm_device *dev)
+int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
-       struct intel_context *ctx = dev_priv->kernel_context;
        struct i915_guc_client *client;
 
        /* client for execbuf submission */
-       client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx);
+       client = guc_client_alloc(dev_priv,
+                                 GUC_CTX_PRIORITY_KMD_NORMAL,
+                                 dev_priv->kernel_context);
        if (!client) {
                DRM_ERROR("Failed to create execbuf guc_client\n");
                return -ENOMEM;
        }
 
        guc->execbuf_client = client;
-
        host2guc_sample_forcewake(guc, client);
+       guc_init_doorbell_hw(guc);
 
        return 0;
 }
 
-void i915_guc_submission_disable(struct drm_device *dev)
+void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
 
-       guc_client_free(dev, guc->execbuf_client);
+       guc_client_free(dev_priv, guc->execbuf_client);
        guc->execbuf_client = NULL;
 }
 
-void i915_guc_submission_fini(struct drm_device *dev)
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
 
        gem_release_guc_obj(dev_priv->guc.ads_obj);
@@ -965,12 +1039,12 @@ void i915_guc_submission_fini(struct drm_device *dev)
  */
 int intel_guc_suspend(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc *guc = &dev_priv->guc;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        u32 data[3];
 
-       if (!i915.enable_guc_submission)
+       if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
                return 0;
 
        ctx = dev_priv->kernel_context;
@@ -991,12 +1065,12 @@ int intel_guc_suspend(struct drm_device *dev)
  */
 int intel_guc_resume(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc *guc = &dev_priv->guc;
-       struct intel_context *ctx;
+       struct i915_gem_context *ctx;
        u32 data[3];
 
-       if (!i915.enable_guc_submission)
+       if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
                return 0;
 
        ctx = dev_priv->kernel_context;
index 2f6fd33c07ba2aaabbce9764cf4a66178637199c..1c2aec39241257fe63183a770053a135b68e2f61 100644 (file)
@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
        dev_priv->gt_irq_mask &= ~interrupt_mask;
        dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       POSTING_READ(GTIMR);
 }
 
 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
 {
        ilk_update_gt_irq(dev_priv, mask, mask);
+       POSTING_READ_FW(GTIMR);
 }
 
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
@@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
        __gen6_disable_pm_irq(dev_priv, mask);
 }
 
-void gen6_reset_rps_interrupts(struct drm_device *dev)
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        i915_reg_t reg = gen6_pm_iir(dev_priv);
 
        spin_lock_irq(&dev_priv->irq_lock);
@@ -349,14 +348,11 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-void gen6_enable_rps_interrupts(struct drm_device *dev)
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        spin_lock_irq(&dev_priv->irq_lock);
-
-       WARN_ON(dev_priv->rps.pm_iir);
-       WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
+       WARN_ON_ONCE(dev_priv->rps.pm_iir);
+       WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
        dev_priv->rps.interrupts_enabled = true;
        I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
                                dev_priv->pm_rps_events);
@@ -367,32 +363,13 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
 
 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
 {
-       /*
-        * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
-        * if GEN6_PM_UP_EI_EXPIRED is masked.
-        *
-        * TODO: verify if this can be reproduced on VLV,CHV.
-        */
-       if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
-               mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
-
-       if (INTEL_INFO(dev_priv)->gen >= 8)
-               mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
-       return mask;
+       return (mask & ~dev_priv->rps.pm_intr_keep);
 }
 
-void gen6_disable_rps_interrupts(struct drm_device *dev)
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        spin_lock_irq(&dev_priv->irq_lock);
        dev_priv->rps.interrupts_enabled = false;
-       spin_unlock_irq(&dev_priv->irq_lock);
-
-       cancel_work_sync(&dev_priv->rps.work);
-
-       spin_lock_irq(&dev_priv->irq_lock);
 
        I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
 
@@ -401,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
                                ~dev_priv->pm_rps_events);
 
        spin_unlock_irq(&dev_priv->irq_lock);
+       synchronize_irq(dev_priv->drm.irq);
 
-       synchronize_irq(dev->irq);
+       /* Now that we will not be generating any more work, flush any
+        * outsanding tasks. As we are called on the RPS idle path,
+        * we will reset the GPU to minimum frequencies, so the current
+        * state of the worker can be discarded.
+        */
+       cancel_work_sync(&dev_priv->rps.work);
+       gen6_reset_rps_interrupts(dev_priv);
 }
 
 /**
@@ -582,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
        u32 enable_mask;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+               enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
                                                           status_mask);
        else
                enable_mask = status_mask << 16;
@@ -596,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
        u32 enable_mask;
 
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
+               enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
                                                           status_mask);
        else
                enable_mask = status_mask << 16;
@@ -605,19 +589,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
 
 /**
  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
- * @dev: drm device
+ * @dev_priv: i915 device private
  */
-static void i915_enable_asle_pipestat(struct drm_device *dev)
+static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
+       if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
                return;
 
        spin_lock_irq(&dev_priv->irq_lock);
 
        i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
-       if (INTEL_INFO(dev)->gen >= 4)
+       if (INTEL_GEN(dev_priv) >= 4)
                i915_enable_pipestat(dev_priv, PIPE_A,
                                     PIPE_LEGACY_BLC_EVENT_STATUS);
 
@@ -685,7 +667,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
  */
 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t high_frame, low_frame;
        u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
        struct intel_crtc *intel_crtc =
@@ -732,7 +714,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 
 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
 }
@@ -741,7 +723,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct drm_display_mode *mode = &crtc->base.hwmode;
        enum pipe pipe = crtc->pipe;
        int position, vtotal;
@@ -750,7 +732,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
                vtotal /= 2;
 
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev_priv))
                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
        else
                position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -767,7 +749,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
         * problem.  We may need to extend this to include other platforms,
         * but so far testing only shows the problem on HSW.
         */
-       if (HAS_DDI(dev) && !position) {
+       if (HAS_DDI(dev_priv) && !position) {
                int i, temp;
 
                for (i = 0; i < 100; i++) {
@@ -793,7 +775,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
                                    ktime_t *stime, ktime_t *etime,
                                    const struct drm_display_mode *mode)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int position;
@@ -835,7 +817,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
        if (stime)
                *stime = ktime_get();
 
-       if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
                /* No obvious pixelcount register. Only query vertical
                 * scanout position from Display scan line register.
                 */
@@ -897,7 +879,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
        else
                position += vtotal - vbl_end;
 
-       if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
                *vpos = position;
                *hpos = 0;
        } else {
@@ -914,7 +896,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 
 int intel_get_crtc_scanline(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        unsigned long irqflags;
        int position;
 
@@ -955,9 +937,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
                                                     &crtc->hwmode);
 }
 
-static void ironlake_rps_change_irq_handler(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 busy_up, busy_down, max_avg, min_avg;
        u8 new_delay;
 
@@ -986,7 +967,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
                        new_delay = dev_priv->ips.min_delay;
        }
 
-       if (ironlake_set_drps(dev, new_delay))
+       if (ironlake_set_drps(dev_priv, new_delay))
                dev_priv->ips.cur_delay = new_delay;
 
        spin_unlock(&mchdev_lock);
@@ -996,13 +977,11 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 
 static void notify_ring(struct intel_engine_cs *engine)
 {
-       if (!intel_engine_initialized(engine))
-               return;
-
-       trace_i915_gem_request_notify(engine);
-       engine->user_interrupts++;
-
-       wake_up_all(&engine->irq_queue);
+       smp_store_mb(engine->breadcrumbs.irq_posted, true);
+       if (intel_engine_wakeup(engine)) {
+               trace_i915_gem_request_notify(engine);
+               engine->breadcrumbs.irq_wakeups++;
+       }
 }
 
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1083,7 +1062,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv)
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv)
-               if (engine->irq_refcount)
+               if (intel_engine_has_waiter(engine))
                        return true;
 
        return false;
@@ -1104,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
                return;
        }
 
-       /*
-        * The RPS work is synced during runtime suspend, we don't require a
-        * wakeref. TODO: instead of disabling the asserts make sure that we
-        * always hold an RPM reference while the work is running.
-        */
-       DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
-
        pm_iir = dev_priv->rps.pm_iir;
        dev_priv->rps.pm_iir = 0;
        /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
@@ -1123,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
        WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
 
        if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
-               goto out;
+               return;
 
        mutex_lock(&dev_priv->rps.hw_lock);
 
@@ -1175,11 +1147,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
        new_delay += adj;
        new_delay = clamp_t(int, new_delay, min, max);
 
-       intel_set_rps(dev_priv->dev, new_delay);
+       intel_set_rps(dev_priv, new_delay);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
-out:
-       ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
 }
 
 
@@ -1205,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work)
         * In order to prevent a get/put style interface, acquire struct mutex
         * any time we access those registers.
         */
-       mutex_lock(&dev_priv->dev->struct_mutex);
+       mutex_lock(&dev_priv->drm.struct_mutex);
 
        /* If we've screwed up tracking, just let the interrupt fire again */
        if (WARN_ON(!dev_priv->l3_parity.which_slice))
@@ -1241,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work)
                parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
                parity_event[5] = NULL;
 
-               kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+               kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
                                   KOBJ_CHANGE, parity_event);
 
                DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
@@ -1261,7 +1231,7 @@ out:
        gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -1287,8 +1257,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv
 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
-       if (gt_iir &
-           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+       if (gt_iir & GT_RENDER_USER_INTERRUPT)
                notify_ring(&dev_priv->engine[RCS]);
        if (gt_iir & ILK_BSD_USER_INTERRUPT)
                notify_ring(&dev_priv->engine[VCS]);
@@ -1297,9 +1266,7 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
                               u32 gt_iir)
 {
-
-       if (gt_iir &
-           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+       if (gt_iir & GT_RENDER_USER_INTERRUPT)
                notify_ring(&dev_priv->engine[RCS]);
        if (gt_iir & GT_BSD_USER_INTERRUPT)
                notify_ring(&dev_priv->engine[VCS]);
@@ -1506,27 +1473,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
 
 }
 
-static void gmbus_irq_handler(struct drm_device *dev)
+static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
-static void dp_aux_irq_handler(struct drm_device *dev)
+static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
 #if defined(CONFIG_DEBUG_FS)
-static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+                                        enum pipe pipe,
                                         uint32_t crc0, uint32_t crc1,
                                         uint32_t crc2, uint32_t crc3,
                                         uint32_t crc4)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
        struct intel_pipe_crc_entry *entry;
        int head, tail;
@@ -1550,7 +1513,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
 
        entry = &pipe_crc->entries[head];
 
-       entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+       entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
+                                                                pipe);
        entry->crc[0] = crc0;
        entry->crc[1] = crc1;
        entry->crc[2] = crc2;
@@ -1566,27 +1530,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
 }
 #else
 static inline void
-display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+                            enum pipe pipe,
                             uint32_t crc0, uint32_t crc1,
                             uint32_t crc2, uint32_t crc3,
                             uint32_t crc4) {}
 #endif
 
 
-static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+                                    enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       display_pipe_crc_irq_handler(dev, pipe,
+       display_pipe_crc_irq_handler(dev_priv, pipe,
                                     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
                                     0, 0, 0, 0);
 }
 
-static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+                                    enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       display_pipe_crc_irq_handler(dev, pipe,
+       display_pipe_crc_irq_handler(dev_priv, pipe,
                                     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
                                     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
                                     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
@@ -1594,22 +1557,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
                                     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
 }
 
-static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
+                                     enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t res1, res2;
 
-       if (INTEL_INFO(dev)->gen >= 3)
+       if (INTEL_GEN(dev_priv) >= 3)
                res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
        else
                res1 = 0;
 
-       if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
                res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
        else
                res2 = 0;
 
-       display_pipe_crc_irq_handler(dev, pipe,
+       display_pipe_crc_irq_handler(dev_priv, pipe,
                                     I915_READ(PIPE_CRC_RES_RED(pipe)),
                                     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
                                     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
@@ -1626,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
                gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
                if (dev_priv->rps.interrupts_enabled) {
                        dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
-                       queue_work(dev_priv->wq, &dev_priv->rps.work);
+                       schedule_work(&dev_priv->rps.work);
                }
                spin_unlock(&dev_priv->irq_lock);
        }
@@ -1643,18 +1606,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
        }
 }
 
-static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
+static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
+                                    enum pipe pipe)
 {
-       if (!drm_handle_vblank(dev, pipe))
-               return false;
+       bool ret;
 
-       return true;
+       ret = drm_handle_vblank(&dev_priv->drm, pipe);
+       if (ret)
+               intel_finish_page_flip_mmio(dev_priv, pipe);
+
+       return ret;
 }
 
-static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
-                                       u32 pipe_stats[I915_MAX_PIPES])
+static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
+                                       u32 iir, u32 pipe_stats[I915_MAX_PIPES])
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
 
        spin_lock(&dev_priv->irq_lock);
@@ -1710,31 +1676,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
        spin_unlock(&dev_priv->irq_lock);
 }
 
-static void valleyview_pipestat_irq_handler(struct drm_device *dev,
+static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
                                            u32 pipe_stats[I915_MAX_PIPES])
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
 
        for_each_pipe(dev_priv, pipe) {
                if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
-                   intel_pipe_handle_vblank(dev, pipe))
-                       intel_check_page_flip(dev, pipe);
+                   intel_pipe_handle_vblank(dev_priv, pipe))
+                       intel_check_page_flip(dev_priv, pipe);
 
-               if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
-                       intel_prepare_page_flip(dev, pipe);
-                       intel_finish_page_flip(dev, pipe);
-               }
+               if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
+                       intel_finish_page_flip_cs(dev_priv, pipe);
 
                if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-                       i9xx_pipe_crc_irq_handler(dev, pipe);
+                       i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 
                if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
        }
 
        if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
-               gmbus_irq_handler(dev);
+               gmbus_irq_handler(dev_priv);
 }
 
 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
@@ -1747,12 +1710,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
        return hotplug_status;
 }
 
-static void i9xx_hpd_irq_handler(struct drm_device *dev,
+static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
                                 u32 hotplug_status)
 {
        u32 pin_mask = 0, long_mask = 0;
 
-       if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+       if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
+           IS_CHERRYVIEW(dev_priv)) {
                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
 
                if (hotplug_trigger) {
@@ -1760,11 +1724,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
                                           hotplug_trigger, hpd_status_g4x,
                                           i9xx_port_hotplug_long_detect);
 
-                       intel_hpd_irq_handler(dev, pin_mask, long_mask);
+                       intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
                }
 
                if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
-                       dp_aux_irq_handler(dev);
+                       dp_aux_irq_handler(dev_priv);
        } else {
                u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
 
@@ -1772,7 +1736,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
                        intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
                                           hotplug_trigger, hpd_status_i915,
                                           i9xx_port_hotplug_long_detect);
-                       intel_hpd_irq_handler(dev, pin_mask, long_mask);
+                       intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
                }
        }
 }
@@ -1780,7 +1744,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        irqreturn_t ret = IRQ_NONE;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -1831,7 +1795,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 
                /* Call regardless, as some status bits might not be
                 * signalled in iir */
-               valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+               valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
 
                /*
                 * VLV_IIR is single buffered, and reflects the level
@@ -1850,9 +1814,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                        gen6_rps_irq_handler(dev_priv, pm_iir);
 
                if (hotplug_status)
-                       i9xx_hpd_irq_handler(dev, hotplug_status);
+                       i9xx_hpd_irq_handler(dev_priv, hotplug_status);
 
-               valleyview_pipestat_irq_handler(dev, pipe_stats);
+               valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
        } while (0);
 
        enable_rpm_wakeref_asserts(dev_priv);
@@ -1863,7 +1827,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        irqreturn_t ret = IRQ_NONE;
 
        if (!intel_irqs_enabled(dev_priv))
@@ -1911,7 +1875,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
 
                /* Call regardless, as some status bits might not be
                 * signalled in iir */
-               valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+               valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
 
                /*
                 * VLV_IIR is single buffered, and reflects the level
@@ -1927,9 +1891,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
                gen8_gt_irq_handler(dev_priv, gt_iir);
 
                if (hotplug_status)
-                       i9xx_hpd_irq_handler(dev, hotplug_status);
+                       i9xx_hpd_irq_handler(dev_priv, hotplug_status);
 
-               valleyview_pipestat_irq_handler(dev, pipe_stats);
+               valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
        } while (0);
 
        enable_rpm_wakeref_asserts(dev_priv);
@@ -1937,10 +1901,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
        return ret;
 }
 
-static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
+                               u32 hotplug_trigger,
                                const u32 hpd[HPD_NUM_PINS])
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
        /*
@@ -1966,16 +1930,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
                           dig_hotplug_reg, hpd,
                           pch_port_hotplug_long_detect);
 
-       intel_hpd_irq_handler(dev, pin_mask, long_mask);
+       intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
 }
 
-static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
 
-       ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+       ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
 
        if (pch_iir & SDE_AUDIO_POWER_MASK) {
                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1985,10 +1948,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
        }
 
        if (pch_iir & SDE_AUX_MASK)
-               dp_aux_irq_handler(dev);
+               dp_aux_irq_handler(dev_priv);
 
        if (pch_iir & SDE_GMBUS)
-               gmbus_irq_handler(dev);
+               gmbus_irq_handler(dev_priv);
 
        if (pch_iir & SDE_AUDIO_HDCP_MASK)
                DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -2018,9 +1981,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
                intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
 }
 
-static void ivb_err_int_handler(struct drm_device *dev)
+static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 err_int = I915_READ(GEN7_ERR_INT);
        enum pipe pipe;
 
@@ -2032,19 +1994,18 @@ static void ivb_err_int_handler(struct drm_device *dev)
                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 
                if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
-                       if (IS_IVYBRIDGE(dev))
-                               ivb_pipe_crc_irq_handler(dev, pipe);
+                       if (IS_IVYBRIDGE(dev_priv))
+                               ivb_pipe_crc_irq_handler(dev_priv, pipe);
                        else
-                               hsw_pipe_crc_irq_handler(dev, pipe);
+                               hsw_pipe_crc_irq_handler(dev_priv, pipe);
                }
        }
 
        I915_WRITE(GEN7_ERR_INT, err_int);
 }
 
-static void cpt_serr_int_handler(struct drm_device *dev)
+static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 serr_int = I915_READ(SERR_INT);
 
        if (serr_int & SERR_INT_POISON)
@@ -2062,13 +2023,12 @@ static void cpt_serr_int_handler(struct drm_device *dev)
        I915_WRITE(SERR_INT, serr_int);
 }
 
-static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe;
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
 
-       ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+       ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
 
        if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
                int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2078,10 +2038,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
        }
 
        if (pch_iir & SDE_AUX_MASK_CPT)
-               dp_aux_irq_handler(dev);
+               dp_aux_irq_handler(dev_priv);
 
        if (pch_iir & SDE_GMBUS_CPT)
-               gmbus_irq_handler(dev);
+               gmbus_irq_handler(dev_priv);
 
        if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
                DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -2096,12 +2056,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
                                         I915_READ(FDI_RX_IIR(pipe)));
 
        if (pch_iir & SDE_ERROR_CPT)
-               cpt_serr_int_handler(dev);
+               cpt_serr_int_handler(dev_priv);
 }
 
-static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
                ~SDE_PORTE_HOTPLUG_SPT;
        u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -2130,16 +2089,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
        }
 
        if (pin_mask)
-               intel_hpd_irq_handler(dev, pin_mask, long_mask);
+               intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
 
        if (pch_iir & SDE_GMBUS_CPT)
-               gmbus_irq_handler(dev);
+               gmbus_irq_handler(dev_priv);
 }
 
-static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
+                               u32 hotplug_trigger,
                                const u32 hpd[HPD_NUM_PINS])
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
        dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
@@ -2149,97 +2108,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
                           dig_hotplug_reg, hpd,
                           ilk_port_hotplug_long_detect);
 
-       intel_hpd_irq_handler(dev, pin_mask, long_mask);
+       intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
 }
 
-static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
+static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
+                                   u32 de_iir)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
 
        if (hotplug_trigger)
-               ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
+               ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
 
        if (de_iir & DE_AUX_CHANNEL_A)
-               dp_aux_irq_handler(dev);
+               dp_aux_irq_handler(dev_priv);
 
        if (de_iir & DE_GSE)
-               intel_opregion_asle_intr(dev);
+               intel_opregion_asle_intr(dev_priv);
 
        if (de_iir & DE_POISON)
                DRM_ERROR("Poison interrupt\n");
 
        for_each_pipe(dev_priv, pipe) {
                if (de_iir & DE_PIPE_VBLANK(pipe) &&
-                   intel_pipe_handle_vblank(dev, pipe))
-                       intel_check_page_flip(dev, pipe);
+                   intel_pipe_handle_vblank(dev_priv, pipe))
+                       intel_check_page_flip(dev_priv, pipe);
 
                if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 
                if (de_iir & DE_PIPE_CRC_DONE(pipe))
-                       i9xx_pipe_crc_irq_handler(dev, pipe);
+                       i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 
                /* plane/pipes map 1:1 on ilk+ */
-               if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
-                       intel_prepare_page_flip(dev, pipe);
-                       intel_finish_page_flip_plane(dev, pipe);
-               }
+               if (de_iir & DE_PLANE_FLIP_DONE(pipe))
+                       intel_finish_page_flip_cs(dev_priv, pipe);
        }
 
        /* check event from PCH */
        if (de_iir & DE_PCH_EVENT) {
                u32 pch_iir = I915_READ(SDEIIR);
 
-               if (HAS_PCH_CPT(dev))
-                       cpt_irq_handler(dev, pch_iir);
+               if (HAS_PCH_CPT(dev_priv))
+                       cpt_irq_handler(dev_priv, pch_iir);
                else
-                       ibx_irq_handler(dev, pch_iir);
+                       ibx_irq_handler(dev_priv, pch_iir);
 
                /* should clear PCH hotplug event before clear CPU irq */
                I915_WRITE(SDEIIR, pch_iir);
        }
 
-       if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
-               ironlake_rps_change_irq_handler(dev);
+       if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
+               ironlake_rps_change_irq_handler(dev_priv);
 }
 
-static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
+                                   u32 de_iir)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
        u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
 
        if (hotplug_trigger)
-               ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
+               ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
 
        if (de_iir & DE_ERR_INT_IVB)
-               ivb_err_int_handler(dev);
+               ivb_err_int_handler(dev_priv);
 
        if (de_iir & DE_AUX_CHANNEL_A_IVB)
-               dp_aux_irq_handler(dev);
+               dp_aux_irq_handler(dev_priv);
 
        if (de_iir & DE_GSE_IVB)
-               intel_opregion_asle_intr(dev);
+               intel_opregion_asle_intr(dev_priv);
 
        for_each_pipe(dev_priv, pipe) {
                if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
-                   intel_pipe_handle_vblank(dev, pipe))
-                       intel_check_page_flip(dev, pipe);
+                   intel_pipe_handle_vblank(dev_priv, pipe))
+                       intel_check_page_flip(dev_priv, pipe);
 
                /* plane/pipes map 1:1 on ilk+ */
-               if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
-                       intel_prepare_page_flip(dev, pipe);
-                       intel_finish_page_flip_plane(dev, pipe);
-               }
+               if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
+                       intel_finish_page_flip_cs(dev_priv, pipe);
        }
 
        /* check event from PCH */
-       if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+       if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
                u32 pch_iir = I915_READ(SDEIIR);
 
-               cpt_irq_handler(dev, pch_iir);
+               cpt_irq_handler(dev_priv, pch_iir);
 
                /* clear PCH hotplug event before clear CPU irq */
                I915_WRITE(SDEIIR, pch_iir);
@@ -2257,7 +2212,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
        irqreturn_t ret = IRQ_NONE;
 
@@ -2277,7 +2232,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
         * able to process them after we restore SDEIER (as soon as we restore
         * it, we'll get an interrupt if SDEIIR still has something to process
         * due to its back queue). */
-       if (!HAS_PCH_NOP(dev)) {
+       if (!HAS_PCH_NOP(dev_priv)) {
                sde_ier = I915_READ(SDEIER);
                I915_WRITE(SDEIER, 0);
                POSTING_READ(SDEIER);
@@ -2289,7 +2244,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        if (gt_iir) {
                I915_WRITE(GTIIR, gt_iir);
                ret = IRQ_HANDLED;
-               if (INTEL_INFO(dev)->gen >= 6)
+               if (INTEL_GEN(dev_priv) >= 6)
                        snb_gt_irq_handler(dev_priv, gt_iir);
                else
                        ilk_gt_irq_handler(dev_priv, gt_iir);
@@ -2299,13 +2254,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        if (de_iir) {
                I915_WRITE(DEIIR, de_iir);
                ret = IRQ_HANDLED;
-               if (INTEL_INFO(dev)->gen >= 7)
-                       ivb_display_irq_handler(dev, de_iir);
+               if (INTEL_GEN(dev_priv) >= 7)
+                       ivb_display_irq_handler(dev_priv, de_iir);
                else
-                       ilk_display_irq_handler(dev, de_iir);
+                       ilk_display_irq_handler(dev_priv, de_iir);
        }
 
-       if (INTEL_INFO(dev)->gen >= 6) {
+       if (INTEL_GEN(dev_priv) >= 6) {
                u32 pm_iir = I915_READ(GEN6_PMIIR);
                if (pm_iir) {
                        I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -2316,7 +2271,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 
        I915_WRITE(DEIER, de_ier);
        POSTING_READ(DEIER);
-       if (!HAS_PCH_NOP(dev)) {
+       if (!HAS_PCH_NOP(dev_priv)) {
                I915_WRITE(SDEIER, sde_ier);
                POSTING_READ(SDEIER);
        }
@@ -2327,10 +2282,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        return ret;
 }
 
-static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
+static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
+                               u32 hotplug_trigger,
                                const u32 hpd[HPD_NUM_PINS])
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
 
        dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
@@ -2340,13 +2295,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
                           dig_hotplug_reg, hpd,
                           bxt_port_hotplug_long_detect);
 
-       intel_hpd_irq_handler(dev, pin_mask, long_mask);
+       intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
 }
 
 static irqreturn_t
 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 {
-       struct drm_device *dev = dev_priv->dev;
        irqreturn_t ret = IRQ_NONE;
        u32 iir;
        enum pipe pipe;
@@ -2357,7 +2311,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                        I915_WRITE(GEN8_DE_MISC_IIR, iir);
                        ret = IRQ_HANDLED;
                        if (iir & GEN8_DE_MISC_GSE)
-                               intel_opregion_asle_intr(dev);
+                               intel_opregion_asle_intr(dev_priv);
                        else
                                DRM_ERROR("Unexpected DE Misc interrupt\n");
                }
@@ -2381,26 +2335,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                                            GEN9_AUX_CHANNEL_D;
 
                        if (iir & tmp_mask) {
-                               dp_aux_irq_handler(dev);
+                               dp_aux_irq_handler(dev_priv);
                                found = true;
                        }
 
                        if (IS_BROXTON(dev_priv)) {
                                tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
                                if (tmp_mask) {
-                                       bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
+                                       bxt_hpd_irq_handler(dev_priv, tmp_mask,
+                                                           hpd_bxt);
                                        found = true;
                                }
                        } else if (IS_BROADWELL(dev_priv)) {
                                tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
                                if (tmp_mask) {
-                                       ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
+                                       ilk_hpd_irq_handler(dev_priv,
+                                                           tmp_mask, hpd_bdw);
                                        found = true;
                                }
                        }
 
-                       if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
-                               gmbus_irq_handler(dev);
+                       if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
+                               gmbus_irq_handler(dev_priv);
                                found = true;
                        }
 
@@ -2427,8 +2383,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
 
                if (iir & GEN8_PIPE_VBLANK &&
-                   intel_pipe_handle_vblank(dev, pipe))
-                       intel_check_page_flip(dev, pipe);
+                   intel_pipe_handle_vblank(dev_priv, pipe))
+                       intel_check_page_flip(dev_priv, pipe);
 
                flip_done = iir;
                if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2436,13 +2392,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                else
                        flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
 
-               if (flip_done) {
-                       intel_prepare_page_flip(dev, pipe);
-                       intel_finish_page_flip_plane(dev, pipe);
-               }
+               if (flip_done)
+                       intel_finish_page_flip_cs(dev_priv, pipe);
 
                if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
-                       hsw_pipe_crc_irq_handler(dev, pipe);
+                       hsw_pipe_crc_irq_handler(dev_priv, pipe);
 
                if (iir & GEN8_PIPE_FIFO_UNDERRUN)
                        intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2459,7 +2413,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                                  fault_errors);
        }
 
-       if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
+       if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
            master_ctl & GEN8_DE_PCH_IRQ) {
                /*
                 * FIXME(BDW): Assume for now that the new interrupt handling
@@ -2471,10 +2425,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                        I915_WRITE(SDEIIR, iir);
                        ret = IRQ_HANDLED;
 
-                       if (HAS_PCH_SPT(dev_priv))
-                               spt_irq_handler(dev, iir);
+                       if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
+                               spt_irq_handler(dev_priv, iir);
                        else
-                               cpt_irq_handler(dev, iir);
+                               cpt_irq_handler(dev_priv, iir);
                } else {
                        /*
                         * Like on previous PCH there seems to be something
@@ -2490,7 +2444,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 static irqreturn_t gen8_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 master_ctl;
        u32 gt_iir[4] = {};
        irqreturn_t ret;
@@ -2521,11 +2475,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        return ret;
 }
 
-static void i915_error_wake_up(struct drm_i915_private *dev_priv,
-                              bool reset_completed)
+static void i915_error_wake_up(struct drm_i915_private *dev_priv)
 {
-       struct intel_engine_cs *engine;
-
        /*
         * Notify all waiters for GPU completion events that reset state has
         * been changed, and that they need to restart their wait after
@@ -2534,36 +2485,28 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
         */
 
        /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
-       for_each_engine(engine, dev_priv)
-               wake_up_all(&engine->irq_queue);
+       wake_up_all(&dev_priv->gpu_error.wait_queue);
 
        /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
        wake_up_all(&dev_priv->pending_flip_queue);
-
-       /*
-        * Signal tasks blocked in i915_gem_wait_for_error that the pending
-        * reset state is cleared.
-        */
-       if (reset_completed)
-               wake_up_all(&dev_priv->gpu_error.reset_queue);
 }
 
 /**
  * i915_reset_and_wakeup - do process context error handling work
- * @dev: drm device
+ * @dev_priv: i915 device private
  *
  * Fire an error uevent so userspace can see that a hang or error
  * was detected.
  */
-static void i915_reset_and_wakeup(struct drm_device *dev)
+static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
        char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
        char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
        int ret;
 
-       kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
+       kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
 
        /*
         * Note that there's only one work item which does gpu resets, so we
@@ -2577,8 +2520,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
         */
        if (i915_reset_in_progress(&dev_priv->gpu_error)) {
                DRM_DEBUG_DRIVER("resetting chip\n");
-               kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
-                                  reset_event);
+               kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
 
                /*
                 * In most cases it's guaranteed that we get here with an RPM
@@ -2589,7 +2531,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
                 */
                intel_runtime_pm_get(dev_priv);
 
-               intel_prepare_reset(dev);
+               intel_prepare_reset(dev_priv);
 
                /*
                 * All state reset _must_ be completed before we update the
@@ -2597,27 +2539,26 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
                 * pending state and not properly drop locks, resulting in
                 * deadlocks with the reset work.
                 */
-               ret = i915_reset(dev);
+               ret = i915_reset(dev_priv);
 
-               intel_finish_reset(dev);
+               intel_finish_reset(dev_priv);
 
                intel_runtime_pm_put(dev_priv);
 
                if (ret == 0)
-                       kobject_uevent_env(&dev->primary->kdev->kobj,
+                       kobject_uevent_env(kobj,
                                           KOBJ_CHANGE, reset_done_event);
 
                /*
                 * Note: The wake_up also serves as a memory barrier so that
                 * waiters see the update value of the reset counter atomic_t.
                 */
-               i915_error_wake_up(dev_priv, true);
+               wake_up_all(&dev_priv->gpu_error.reset_queue);
        }
 }
 
-static void i915_report_and_clear_eir(struct drm_device *dev)
+static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t instdone[I915_NUM_INSTDONE_REG];
        u32 eir = I915_READ(EIR);
        int pipe, i;
@@ -2627,9 +2568,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 
        pr_err("render error detected, EIR: 0x%08x\n", eir);
 
-       i915_get_extra_instdone(dev, instdone);
+       i915_get_extra_instdone(dev_priv, instdone);
 
-       if (IS_G4X(dev)) {
+       if (IS_G4X(dev_priv)) {
                if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
                        u32 ipeir = I915_READ(IPEIR_I965);
 
@@ -2651,7 +2592,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
                }
        }
 
-       if (!IS_GEN2(dev)) {
+       if (!IS_GEN2(dev_priv)) {
                if (eir & I915_ERROR_PAGE_TABLE) {
                        u32 pgtbl_err = I915_READ(PGTBL_ER);
                        pr_err("page table error\n");
@@ -2673,7 +2614,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
                pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
                for (i = 0; i < ARRAY_SIZE(instdone); i++)
                        pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
-               if (INTEL_INFO(dev)->gen < 4) {
+               if (INTEL_GEN(dev_priv) < 4) {
                        u32 ipeir = I915_READ(IPEIR);
 
                        pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
@@ -2709,18 +2650,19 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 
 /**
  * i915_handle_error - handle a gpu error
- * @dev: drm device
+ * @dev_priv: i915 device private
  * @engine_mask: mask representing engines that are hung
  * Do some basic checking of register state at error time and
  * dump it to the syslog.  Also call i915_capture_error_state() to make
  * sure we get a record and make it available in debugfs.  Fire a uevent
  * so userspace knows something bad happened (should trigger collection
  * of a ring dump etc.).
+ * @fmt: Error message format string
  */
-void i915_handle_error(struct drm_device *dev, u32 engine_mask,
+void i915_handle_error(struct drm_i915_private *dev_priv,
+                      u32 engine_mask,
                       const char *fmt, ...)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        va_list args;
        char error_msg[80];
 
@@ -2728,8 +2670,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
        vscnprintf(error_msg, sizeof(error_msg), fmt, args);
        va_end(args);
 
-       i915_capture_error_state(dev, engine_mask, error_msg);
-       i915_report_and_clear_eir(dev);
+       i915_capture_error_state(dev_priv, engine_mask, error_msg);
+       i915_report_and_clear_eir(dev_priv);
 
        if (engine_mask) {
                atomic_or(I915_RESET_IN_PROGRESS_FLAG,
@@ -2748,10 +2690,10 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
                 * ensure that the waiters see the updated value of the reset
                 * counter atomic_t.
                 */
-               i915_error_wake_up(dev_priv, false);
+               i915_error_wake_up(dev_priv);
        }
 
-       i915_reset_and_wakeup(dev);
+       i915_reset_and_wakeup(dev_priv);
 }
 
 /* Called from drm generic code, passed 'crtc' which
@@ -2759,7 +2701,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
  */
 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2776,7 +2718,7 @@ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
        uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
                                                     DE_PIPE_VBLANK(pipe);
@@ -2790,7 +2732,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2803,7 +2745,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2818,7 +2760,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
  */
 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2830,7 +2772,7 @@ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
        uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
                                                     DE_PIPE_VBLANK(pipe);
@@ -2842,7 +2784,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2853,7 +2795,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
 
 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned long irqflags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2869,9 +2811,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
 }
 
 static bool
-ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
 {
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(engine->i915) >= 8) {
                return (ipehr >> 23) == 0x1c;
        } else {
                ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2884,10 +2826,10 @@ static struct intel_engine_cs *
 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
                                 u64 offset)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        struct intel_engine_cs *signaller;
 
-       if (INTEL_INFO(dev_priv)->gen >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8) {
                for_each_engine(signaller, dev_priv) {
                        if (engine == signaller)
                                continue;
@@ -2916,7 +2858,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
 static struct intel_engine_cs *
 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u32 cmd, ipehr, head;
        u64 offset = 0;
        int i, backwards;
@@ -2942,7 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
                return NULL;
 
        ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
-       if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
+       if (!ipehr_is_semaphore_wait(engine, ipehr))
                return NULL;
 
        /*
@@ -2954,7 +2896,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
         * ringbuffer itself.
         */
        head = I915_READ_HEAD(engine) & HEAD_ADDR;
-       backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
+       backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
 
        for (i = backwards; i; --i) {
                /*
@@ -2976,7 +2918,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
                return NULL;
 
        *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
-       if (INTEL_INFO(engine->dev)->gen >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8) {
                offset = ioread32(engine->buffer->virtual_start + head + 12);
                offset <<= 32;
                offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2928,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
 
 static int semaphore_passed(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        struct intel_engine_cs *signaller;
        u32 seqno;
 
@@ -3000,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
        if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
                return -1;
 
-       if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
+       if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
                return 1;
 
        /* cursory check for an unkickable deadlock */
@@ -3028,7 +2970,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
        if (engine->id != RCS)
                return true;
 
-       i915_get_extra_instdone(engine->dev, instdone);
+       i915_get_extra_instdone(engine->i915, instdone);
 
        /* There might be unstable subunit states even when
         * actual head is not moving. Filter out the unstable ones by
@@ -3069,8 +3011,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
 static enum intel_ring_hangcheck_action
 ring_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        enum intel_ring_hangcheck_action ha;
        u32 tmp;
 
@@ -3078,7 +3019,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
        if (ha != HANGCHECK_HUNG)
                return ha;
 
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev_priv))
                return HANGCHECK_HUNG;
 
        /* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3029,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
         */
        tmp = I915_READ_CTL(engine);
        if (tmp & RING_WAIT) {
-               i915_handle_error(dev, 0,
+               i915_handle_error(dev_priv, 0,
                                  "Kicking stuck wait on %s",
                                  engine->name);
                I915_WRITE_CTL(engine, tmp);
                return HANGCHECK_KICK;
        }
 
-       if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
+       if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
                switch (semaphore_passed(engine)) {
                default:
                        return HANGCHECK_HUNG;
                case 1:
-                       i915_handle_error(dev, 0,
+                       i915_handle_error(dev_priv, 0,
                                          "Kicking stuck semaphore on %s",
                                          engine->name);
                        I915_WRITE_CTL(engine, tmp);
@@ -3113,23 +3054,21 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
        return HANGCHECK_HUNG;
 }
 
-static unsigned kick_waiters(struct intel_engine_cs *engine)
+static unsigned long kick_waiters(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *i915 = to_i915(engine->dev);
-       unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
+       struct drm_i915_private *i915 = engine->i915;
+       unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
 
-       if (engine->hangcheck.user_interrupts == user_interrupts &&
+       if (engine->hangcheck.user_interrupts == irq_count &&
            !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
-               if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
+               if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
                        DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
                                  engine->name);
-               else
-                       DRM_INFO("Fake missed irq on %s\n",
-                                engine->name);
-               wake_up_all(&engine->irq_queue);
+
+               intel_engine_enable_fake_irq(engine);
        }
 
-       return user_interrupts;
+       return irq_count;
 }
 /*
  * This is called when the chip hasn't reported back with completed
@@ -3144,11 +3083,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv),
                             gpu_error.hangcheck_work.work);
-       struct drm_device *dev = dev_priv->dev;
        struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int busy_count = 0, rings_hung = 0;
-       bool stuck[I915_NUM_ENGINES] = { 0 };
+       unsigned int hung = 0, stuck = 0;
+       int busy_count = 0;
 #define BUSY 1
 #define KICK 5
 #define HUNG 20
@@ -3157,12 +3094,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
        if (!i915.enable_hangcheck)
                return;
 
-       /*
-        * The hangcheck work is synced during runtime suspend, we don't
-        * require a wakeref. TODO: instead of disabling the asserts make
-        * sure that we hold a reference when this work is running.
-        */
-       DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
+       if (!READ_ONCE(dev_priv->gt.awake))
+               return;
 
        /* As enabling the GPU requires fairly extensive mmio access,
         * periodically arm the mmio checker to see if we are triggering
@@ -3170,11 +3103,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
         */
        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
 
-       for_each_engine_id(engine, dev_priv, id) {
+       for_each_engine(engine, dev_priv) {
+               bool busy = intel_engine_has_waiter(engine);
                u64 acthd;
                u32 seqno;
                unsigned user_interrupts;
-               bool busy = true;
 
                semaphore_clear_deadlocks(dev_priv);
 
@@ -3189,7 +3122,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                        engine->irq_seqno_barrier(engine);
 
                acthd = intel_ring_get_active_head(engine);
-               seqno = engine->get_seqno(engine);
+               seqno = intel_engine_get_seqno(engine);
 
                /* Reset stuck interrupts between batch advances */
                user_interrupts = 0;
@@ -3197,12 +3130,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                if (engine->hangcheck.seqno == seqno) {
                        if (ring_idle(engine, seqno)) {
                                engine->hangcheck.action = HANGCHECK_IDLE;
-                               if (waitqueue_active(&engine->irq_queue)) {
+                               if (busy) {
                                        /* Safeguard against driver failure */
                                        user_interrupts = kick_waiters(engine);
                                        engine->hangcheck.score += BUSY;
-                               } else
-                                       busy = false;
+                               }
                        } else {
                                /* We always increment the hangcheck score
                                 * if the ring is busy and still processing
@@ -3234,10 +3166,15 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                                        break;
                                case HANGCHECK_HUNG:
                                        engine->hangcheck.score += HUNG;
-                                       stuck[id] = true;
                                        break;
                                }
                        }
+
+                       if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+                               hung |= intel_engine_flag(engine);
+                               if (engine->hangcheck.action != HANGCHECK_HUNG)
+                                       stuck |= intel_engine_flag(engine);
+                       }
                } else {
                        engine->hangcheck.action = HANGCHECK_ACTIVE;
 
@@ -3262,48 +3199,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                busy_count += busy;
        }
 
-       for_each_engine_id(engine, dev_priv, id) {
-               if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
-                       DRM_INFO("%s on %s\n",
-                                stuck[id] ? "stuck" : "no progress",
-                                engine->name);
-                       rings_hung |= intel_engine_flag(engine);
-               }
-       }
+       if (hung) {
+               char msg[80];
+               int len;
 
-       if (rings_hung) {
-               i915_handle_error(dev, rings_hung, "Engine(s) hung");
-               goto out;
+               /* If some rings hung but others were still busy, only
+                * blame the hanging rings in the synopsis.
+                */
+               if (stuck != hung)
+                       hung &= ~stuck;
+               len = scnprintf(msg, sizeof(msg),
+                               "%s on ", stuck == hung ? "No progress" : "Hang");
+               for_each_engine_masked(engine, dev_priv, hung)
+                       len += scnprintf(msg + len, sizeof(msg) - len,
+                                        "%s, ", engine->name);
+               msg[len-2] = '\0';
+
+               return i915_handle_error(dev_priv, hung, msg);
        }
 
+       /* Reset timer in case GPU hangs without another request being added */
        if (busy_count)
-               /* Reset timer case chip hangs without another request
-                * being added */
-               i915_queue_hangcheck(dev);
-
-out:
-       ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
-}
-
-void i915_queue_hangcheck(struct drm_device *dev)
-{
-       struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
-
-       if (!i915.enable_hangcheck)
-               return;
-
-       /* Don't continually defer the hangcheck so that it is always run at
-        * least once after work has been scheduled on any ring. Otherwise,
-        * we will ignore a hung ring if a second ring is kept busy.
-        */
-
-       queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
-                          round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
+               i915_queue_hangcheck(dev_priv);
 }
 
 static void ibx_irq_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (HAS_PCH_NOP(dev))
                return;
@@ -3324,7 +3246,7 @@ static void ibx_irq_reset(struct drm_device *dev)
  */
 static void ibx_irq_pre_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (HAS_PCH_NOP(dev))
                return;
@@ -3336,7 +3258,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
 
 static void gen5_gt_irq_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        GEN5_IRQ_RESET(GT);
        if (INTEL_INFO(dev)->gen >= 6)
@@ -3396,7 +3318,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
 */
 static void ironlake_irq_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(HWSTAM, 0xffffffff);
 
@@ -3411,7 +3333,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
 
 static void valleyview_irq_preinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(VLV_MASTER_IER, 0);
        POSTING_READ(VLV_MASTER_IER);
@@ -3434,7 +3356,7 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
 
 static void gen8_irq_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        I915_WRITE(GEN8_MASTER_IRQ, 0);
@@ -3480,12 +3402,12 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
        spin_unlock_irq(&dev_priv->irq_lock);
 
        /* make sure we're done processing display irqs */
-       synchronize_irq(dev_priv->dev->irq);
+       synchronize_irq(dev_priv->drm.irq);
 }
 
 static void cherryview_irq_preinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(GEN8_MASTER_IRQ, 0);
        POSTING_READ(GEN8_MASTER_IRQ);
@@ -3500,31 +3422,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
-static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
+static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
                                  const u32 hpd[HPD_NUM_PINS])
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        u32 enabled_irqs = 0;
 
-       for_each_intel_encoder(dev, encoder)
+       for_each_intel_encoder(&dev_priv->drm, encoder)
                if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
                        enabled_irqs |= hpd[encoder->hpd_pin];
 
        return enabled_irqs;
 }
 
-static void ibx_hpd_irq_setup(struct drm_device *dev)
+static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_irqs, hotplug, enabled_irqs;
 
-       if (HAS_PCH_IBX(dev)) {
+       if (HAS_PCH_IBX(dev_priv)) {
                hotplug_irqs = SDE_HOTPLUG_MASK;
-               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
        } else {
                hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
-               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
        }
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3543,18 +3463,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
         * When CPU and PCH are on the same package, port A
         * HPD must be enabled in both north and south.
         */
-       if (HAS_PCH_LPT_LP(dev))
+       if (HAS_PCH_LPT_LP(dev_priv))
                hotplug |= PORTA_HOTPLUG_ENABLE;
        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 }
 
-static void spt_hpd_irq_setup(struct drm_device *dev)
+static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_irqs, hotplug, enabled_irqs;
 
        hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
-       enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
 
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
@@ -3569,24 +3488,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev)
        I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
 }
 
-static void ilk_hpd_irq_setup(struct drm_device *dev)
+static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_irqs, hotplug, enabled_irqs;
 
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8) {
                hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
-               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
 
                bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
-       } else if (INTEL_INFO(dev)->gen >= 7) {
+       } else if (INTEL_GEN(dev_priv) >= 7) {
                hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
-               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
 
                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
        } else {
                hotplug_irqs = DE_DP_A_HOTPLUG;
-               enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
+               enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
 
                ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
        }
@@ -3601,15 +3519,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev)
        hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
        I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
 
-       ibx_hpd_irq_setup(dev);
+       ibx_hpd_irq_setup(dev_priv);
 }
 
-static void bxt_hpd_irq_setup(struct drm_device *dev)
+static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_irqs, hotplug, enabled_irqs;
 
-       enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
+       enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
        hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
 
        bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3642,7 +3559,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
 
 static void ibx_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 mask;
 
        if (HAS_PCH_NOP(dev))
@@ -3659,7 +3576,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
 
 static void gen5_gt_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pm_irqs, gt_irqs;
 
        pm_irqs = gt_irqs = 0;
@@ -3673,8 +3590,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 
        gt_irqs |= GT_RENDER_USER_INTERRUPT;
        if (IS_GEN5(dev)) {
-               gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
-                          ILK_BSD_USER_INTERRUPT;
+               gt_irqs |= ILK_BSD_USER_INTERRUPT;
        } else {
                gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
        }
@@ -3696,7 +3612,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
 
 static int ironlake_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 display_mask, extra_mask;
 
        if (INTEL_INFO(dev)->gen >= 7) {
@@ -3775,7 +3691,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
 
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        gen5_gt_irq_postinstall(dev);
 
@@ -3827,6 +3743,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
        uint32_t de_pipe_enables;
        u32 de_port_masked = GEN8_AUX_CHANNEL_A;
        u32 de_port_enables;
+       u32 de_misc_masked = GEN8_DE_MISC_GSE;
        enum pipe pipe;
 
        if (INTEL_INFO(dev_priv)->gen >= 9) {
@@ -3862,11 +3779,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
                                          de_pipe_enables);
 
        GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+       GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
 }
 
 static int gen8_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (HAS_PCH_SPLIT(dev))
                ibx_irq_pre_postinstall(dev);
@@ -3885,7 +3803,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
 
 static int cherryview_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        gen8_gt_irq_postinstall(dev_priv);
 
@@ -3902,7 +3820,7 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
 
 static void gen8_irq_uninstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!dev_priv)
                return;
@@ -3912,7 +3830,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
 
 static void valleyview_irq_uninstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!dev_priv)
                return;
@@ -3932,7 +3850,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
 
 static void cherryview_irq_uninstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!dev_priv)
                return;
@@ -3952,7 +3870,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
 
 static void ironlake_irq_uninstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!dev_priv)
                return;
@@ -3962,7 +3880,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
 
 static void i8xx_irq_preinstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        for_each_pipe(dev_priv, pipe)
@@ -3974,7 +3892,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
 
 static int i8xx_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE16(EMR,
                     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4006,13 +3924,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
 /*
  * Returns true when a page flip has completed.
  */
-static bool i8xx_handle_vblank(struct drm_device *dev,
+static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
                               int plane, int pipe, u32 iir)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
-       if (!intel_pipe_handle_vblank(dev, pipe))
+       if (!intel_pipe_handle_vblank(dev_priv, pipe))
                return false;
 
        if ((iir & flip_pending) == 0)
@@ -4027,19 +3944,18 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
        if (I915_READ16(ISR) & flip_pending)
                goto check_page_flip;
 
-       intel_prepare_page_flip(dev, plane);
-       intel_finish_page_flip(dev, pipe);
+       intel_finish_page_flip_cs(dev_priv, pipe);
        return true;
 
 check_page_flip:
-       intel_check_page_flip(dev, pipe);
+       intel_check_page_flip(dev_priv, pipe);
        return false;
 }
 
 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u16 iir, new_iir;
        u32 pipe_stats[2];
        int pipe;
@@ -4089,15 +4005,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
 
                for_each_pipe(dev_priv, pipe) {
                        int plane = pipe;
-                       if (HAS_FBC(dev))
+                       if (HAS_FBC(dev_priv))
                                plane = !plane;
 
                        if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
-                           i8xx_handle_vblank(dev, plane, pipe, iir))
+                           i8xx_handle_vblank(dev_priv, plane, pipe, iir))
                                flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
 
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-                               i9xx_pipe_crc_irq_handler(dev, pipe);
+                               i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 
                        if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
                                intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4116,7 +4032,7 @@ out:
 
 static void i8xx_irq_uninstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        for_each_pipe(dev_priv, pipe) {
@@ -4131,7 +4047,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
 
 static void i915_irq_preinstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        if (I915_HAS_HOTPLUG(dev)) {
@@ -4149,7 +4065,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
 
 static int i915_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 enable_mask;
 
        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4182,7 +4098,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
        I915_WRITE(IER, enable_mask);
        POSTING_READ(IER);
 
-       i915_enable_asle_pipestat(dev);
+       i915_enable_asle_pipestat(dev_priv);
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
         * just to make the assert_spin_locked check happy. */
@@ -4197,13 +4113,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
 /*
  * Returns true when a page flip has completed.
  */
-static bool i915_handle_vblank(struct drm_device *dev,
+static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
                               int plane, int pipe, u32 iir)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
 
-       if (!intel_pipe_handle_vblank(dev, pipe))
+       if (!intel_pipe_handle_vblank(dev_priv, pipe))
                return false;
 
        if ((iir & flip_pending) == 0)
@@ -4218,19 +4133,18 @@ static bool i915_handle_vblank(struct drm_device *dev,
        if (I915_READ(ISR) & flip_pending)
                goto check_page_flip;
 
-       intel_prepare_page_flip(dev, plane);
-       intel_finish_page_flip(dev, pipe);
+       intel_finish_page_flip_cs(dev_priv, pipe);
        return true;
 
 check_page_flip:
-       intel_check_page_flip(dev, pipe);
+       intel_check_page_flip(dev_priv, pipe);
        return false;
 }
 
 static irqreturn_t i915_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
        u32 flip_mask =
                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4273,11 +4187,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                        break;
 
                /* Consume port.  Then clear IIR or we'll miss events */
-               if (I915_HAS_HOTPLUG(dev) &&
+               if (I915_HAS_HOTPLUG(dev_priv) &&
                    iir & I915_DISPLAY_PORT_INTERRUPT) {
                        u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
                        if (hotplug_status)
-                               i9xx_hpd_irq_handler(dev, hotplug_status);
+                               i9xx_hpd_irq_handler(dev_priv, hotplug_status);
                }
 
                I915_WRITE(IIR, iir & ~flip_mask);
@@ -4288,18 +4202,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
                for_each_pipe(dev_priv, pipe) {
                        int plane = pipe;
-                       if (HAS_FBC(dev))
+                       if (HAS_FBC(dev_priv))
                                plane = !plane;
 
                        if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
-                           i915_handle_vblank(dev, plane, pipe, iir))
+                           i915_handle_vblank(dev_priv, plane, pipe, iir))
                                flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
 
                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
                                blc_event = true;
 
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-                               i9xx_pipe_crc_irq_handler(dev, pipe);
+                               i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 
                        if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
                                intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4307,7 +4221,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
                }
 
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
-                       intel_opregion_asle_intr(dev);
+                       intel_opregion_asle_intr(dev_priv);
 
                /* With MSI, interrupts are only generated when iir
                 * transitions from zero to nonzero.  If another bit got
@@ -4335,7 +4249,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
 static void i915_irq_uninstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        if (I915_HAS_HOTPLUG(dev)) {
@@ -4357,7 +4271,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
 
 static void i965_irq_preinstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4373,7 +4287,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
 
 static int i965_irq_postinstall(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 enable_mask;
        u32 error_mask;
 
@@ -4391,7 +4305,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
                         I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
        enable_mask |= I915_USER_INTERRUPT;
 
-       if (IS_G4X(dev))
+       if (IS_G4X(dev_priv))
                enable_mask |= I915_BSD_USER_INTERRUPT;
 
        /* Interrupt setup is already guaranteed to be single-threaded, this is
@@ -4406,7 +4320,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
         * Enable some error detection, note the instruction error mask
         * bit is reserved, so we leave it masked.
         */
-       if (IS_G4X(dev)) {
+       if (IS_G4X(dev_priv)) {
                error_mask = ~(GM45_ERROR_PAGE_TABLE |
                               GM45_ERROR_MEM_PRIV |
                               GM45_ERROR_CP_PRIV |
@@ -4424,26 +4338,25 @@ static int i965_irq_postinstall(struct drm_device *dev)
        i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
        POSTING_READ(PORT_HOTPLUG_EN);
 
-       i915_enable_asle_pipestat(dev);
+       i915_enable_asle_pipestat(dev_priv);
 
        return 0;
 }
 
-static void i915_hpd_irq_setup(struct drm_device *dev)
+static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 hotplug_en;
 
        assert_spin_locked(&dev_priv->irq_lock);
 
        /* Note HDMI and DP share hotplug bits */
        /* enable bits are the same for all generations */
-       hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
+       hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
        /* Programming the CRT detection parameters tends
           to generate a spurious hotplug event about three
           seconds later.  So just do it once.
        */
-       if (IS_G4X(dev))
+       if (IS_G4X(dev_priv))
                hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
        hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
 
@@ -4458,7 +4371,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
 static irqreturn_t i965_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 iir, new_iir;
        u32 pipe_stats[I915_MAX_PIPES];
        int ret = IRQ_NONE, pipe;
@@ -4510,7 +4423,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
                if (iir & I915_DISPLAY_PORT_INTERRUPT) {
                        u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
                        if (hotplug_status)
-                               i9xx_hpd_irq_handler(dev, hotplug_status);
+                               i9xx_hpd_irq_handler(dev_priv, hotplug_status);
                }
 
                I915_WRITE(IIR, iir & ~flip_mask);
@@ -4523,24 +4436,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 
                for_each_pipe(dev_priv, pipe) {
                        if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
-                           i915_handle_vblank(dev, pipe, pipe, iir))
+                           i915_handle_vblank(dev_priv, pipe, pipe, iir))
                                flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
 
                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
                                blc_event = true;
 
                        if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
-                               i9xx_pipe_crc_irq_handler(dev, pipe);
+                               i9xx_pipe_crc_irq_handler(dev_priv, pipe);
 
                        if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
                                intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
                }
 
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
-                       intel_opregion_asle_intr(dev);
+                       intel_opregion_asle_intr(dev_priv);
 
                if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
-                       gmbus_irq_handler(dev);
+                       gmbus_irq_handler(dev_priv);
 
                /* With MSI, interrupts are only generated when iir
                 * transitions from zero to nonzero.  If another bit got
@@ -4567,7 +4480,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 
 static void i965_irq_uninstall(struct drm_device * dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
 
        if (!dev_priv)
@@ -4597,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
  */
 void intel_irq_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        intel_hpd_init_work(dev_priv);
 
@@ -4611,6 +4524,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
        else
                dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
+       dev_priv->rps.pm_intr_keep = 0;
+
+       /*
+        * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+        * if GEN6_PM_UP_EI_EXPIRED is masked.
+        *
+        * TODO: verify if this can be reproduced on VLV,CHV.
+        */
+       if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+               dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
+
+       if (INTEL_INFO(dev_priv)->gen >= 8)
+               dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
        INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
                          i915_hangcheck_elapsed);
 
@@ -4661,7 +4588,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->disable_vblank = gen8_disable_vblank;
                if (IS_BROXTON(dev))
                        dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
-               else if (HAS_PCH_SPT(dev))
+               else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
                        dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
                else
                        dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
@@ -4674,12 +4601,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->disable_vblank = ironlake_disable_vblank;
                dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
        } else {
-               if (INTEL_INFO(dev_priv)->gen == 2) {
+               if (IS_GEN2(dev_priv)) {
                        dev->driver->irq_preinstall = i8xx_irq_preinstall;
                        dev->driver->irq_postinstall = i8xx_irq_postinstall;
                        dev->driver->irq_handler = i8xx_irq_handler;
                        dev->driver->irq_uninstall = i8xx_irq_uninstall;
-               } else if (INTEL_INFO(dev_priv)->gen == 3) {
+               } else if (IS_GEN3(dev_priv)) {
                        dev->driver->irq_preinstall = i915_irq_preinstall;
                        dev->driver->irq_postinstall = i915_irq_postinstall;
                        dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4717,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
         */
        dev_priv->pm.irqs_enabled = true;
 
-       return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
+       return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
 }
 
 /**
@@ -4729,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
  */
 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
 {
-       drm_irq_uninstall(dev_priv->dev);
+       drm_irq_uninstall(&dev_priv->drm);
        intel_hpd_cancel_work(dev_priv);
        dev_priv->pm.irqs_enabled = false;
 }
@@ -4743,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
 {
-       dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
+       dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
        dev_priv->pm.irqs_enabled = false;
-       synchronize_irq(dev_priv->dev->irq);
+       synchronize_irq(dev_priv->drm.irq);
 }
 
 /**
@@ -4758,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
 {
        dev_priv->pm.irqs_enabled = true;
-       dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
-       dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
+       dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
+       dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
 }
index 1779f02e6df8fd3c406034b4bad1e3f9577e47b2..b6e404c91eed5827f6c8692a57a32f374ee350b9 100644 (file)
@@ -54,10 +54,13 @@ struct i915_params i915 __read_mostly = {
        .verbose_state_checks = 1,
        .nuclear_pageflip = 0,
        .edp_vswing = 0,
-       .enable_guc_submission = false,
+       .enable_guc_loading = 0,
+       .enable_guc_submission = 0,
        .guc_log_level = -1,
        .enable_dp_mst = true,
        .inject_load_failure = 0,
+       .enable_dpcd_backlight = false,
+       .enable_gvt = false,
 };
 
 module_param_named(modeset, i915.modeset, int, 0400);
@@ -197,8 +200,15 @@ MODULE_PARM_DESC(edp_vswing,
                 "(0=use value from vbt [default], 1=low power swing(200mV),"
                 "2=default swing(400mV))");
 
-module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400);
-MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)");
+module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
+MODULE_PARM_DESC(enable_guc_loading,
+               "Enable GuC firmware loading "
+               "(-1=auto, 0=never [default], 1=if available, 2=required)");
+
+module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
+MODULE_PARM_DESC(enable_guc_submission,
+               "Enable GuC submission "
+               "(-1=auto, 0=never [default], 1=if available, 2=required)");
 
 module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
 MODULE_PARM_DESC(guc_log_level,
@@ -210,3 +220,10 @@ MODULE_PARM_DESC(enable_dp_mst,
 module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
 MODULE_PARM_DESC(inject_load_failure,
        "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
+module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
+MODULE_PARM_DESC(enable_dpcd_backlight,
+       "Enable support for DPCD backlight control (default:false)");
+
+module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
+MODULE_PARM_DESC(enable_gvt,
+       "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
index 02bc27804291296a9303088af9c6cc082f2299fd..0ad020b4a925e51a9cea6325b1541c58d2709ea3 100644 (file)
@@ -45,6 +45,8 @@ struct i915_params {
        int enable_ips;
        int invert_brightness;
        int enable_cmd_parser;
+       int enable_guc_loading;
+       int enable_guc_submission;
        int guc_log_level;
        int use_mmio_flip;
        int mmio_debug;
@@ -57,10 +59,11 @@ struct i915_params {
        bool load_detect_test;
        bool reset;
        bool disable_display;
-       bool enable_guc_submission;
        bool verbose_state_checks;
        bool nuclear_pageflip;
        bool enable_dp_mst;
+       bool enable_dpcd_backlight;
+       bool enable_gvt;
 };
 
 extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
new file mode 100644 (file)
index 0000000..949c016
--- /dev/null
@@ -0,0 +1,503 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+
+#include "i915_drv.h"
+
+#define GEN_DEFAULT_PIPEOFFSETS \
+       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+                         PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
+       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+                          TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
+       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+
+#define GEN_CHV_PIPEOFFSETS \
+       .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
+                         CHV_PIPE_C_OFFSET }, \
+       .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
+                          CHV_TRANSCODER_C_OFFSET, }, \
+       .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
+                            CHV_PALETTE_C_OFFSET }
+
+#define CURSOR_OFFSETS \
+       .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+
+#define IVB_CURSOR_OFFSETS \
+       .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+
+#define BDW_COLORS \
+       .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define CHV_COLORS \
+       .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+
+static const struct intel_device_info intel_i830_info = {
+       .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_845g_info = {
+       .gen = 2, .num_pipes = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i85x_info = {
+       .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
+       .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i865g_info = {
+       .gen = 2, .num_pipes = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i915g_info = {
+       .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i915gm_info = {
+       .gen = 3, .is_mobile = 1, .num_pipes = 2,
+       .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945g_info = {
+       .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+static const struct intel_device_info intel_i945gm_info = {
+       .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
+       .has_hotplug = 1, .cursor_needs_physical = 1,
+       .has_overlay = 1, .overlay_needs_physical = 1,
+       .supports_tv = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965g_info = {
+       .gen = 4, .is_broadwater = 1, .num_pipes = 2,
+       .has_hotplug = 1,
+       .has_overlay = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_i965gm_info = {
+       .gen = 4, .is_crestline = 1, .num_pipes = 2,
+       .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+       .supports_tv = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g33_info = {
+       .gen = 3, .is_g33 = 1, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+       .ring_mask = RENDER_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_g45_info = {
+       .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_gm45_info = {
+       .gen = 4, .is_g4x = 1, .num_pipes = 2,
+       .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
+       .has_pipe_cxsr = 1, .has_hotplug = 1,
+       .supports_tv = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+       .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_overlay = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_d_info = {
+       .gen = 5, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_ironlake_m_info = {
+       .gen = 5, .is_mobile = 1, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_d_info = {
+       .gen = 6, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+       .has_llc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+static const struct intel_device_info intel_sandybridge_m_info = {
+       .gen = 6, .is_mobile = 1, .num_pipes = 2,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .has_fbc = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+       .has_llc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+};
+
+#define GEN7_FEATURES  \
+       .gen = 7, .num_pipes = 3, \
+       .need_gfx_hws = 1, .has_hotplug = 1, \
+       .has_fbc = 1, \
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+       .has_llc = 1, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       IVB_CURSOR_OFFSETS
+
+static const struct intel_device_info intel_ivybridge_d_info = {
+       GEN7_FEATURES,
+       .is_ivybridge = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_m_info = {
+       GEN7_FEATURES,
+       .is_ivybridge = 1,
+       .is_mobile = 1,
+};
+
+static const struct intel_device_info intel_ivybridge_q_info = {
+       GEN7_FEATURES,
+       .is_ivybridge = 1,
+       .num_pipes = 0, /* legal, last one wins */
+};
+
+#define VLV_FEATURES  \
+       .gen = 7, .num_pipes = 2, \
+       .need_gfx_hws = 1, .has_hotplug = 1, \
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+       .display_mmio_offset = VLV_DISPLAY_BASE, \
+       GEN_DEFAULT_PIPEOFFSETS, \
+       CURSOR_OFFSETS
+
+static const struct intel_device_info intel_valleyview_m_info = {
+       VLV_FEATURES,
+       .is_valleyview = 1,
+       .is_mobile = 1,
+};
+
+static const struct intel_device_info intel_valleyview_d_info = {
+       VLV_FEATURES,
+       .is_valleyview = 1,
+};
+
+#define HSW_FEATURES  \
+       GEN7_FEATURES, \
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+       .has_ddi = 1, \
+       .has_fpga_dbg = 1
+
+static const struct intel_device_info intel_haswell_d_info = {
+       HSW_FEATURES,
+       .is_haswell = 1,
+};
+
+static const struct intel_device_info intel_haswell_m_info = {
+       HSW_FEATURES,
+       .is_haswell = 1,
+       .is_mobile = 1,
+};
+
+#define BDW_FEATURES \
+       HSW_FEATURES, \
+       BDW_COLORS
+
+static const struct intel_device_info intel_broadwell_d_info = {
+       BDW_FEATURES,
+       .gen = 8,
+       .is_broadwell = 1,
+};
+
+static const struct intel_device_info intel_broadwell_m_info = {
+       BDW_FEATURES,
+       .gen = 8, .is_mobile = 1,
+       .is_broadwell = 1,
+};
+
+static const struct intel_device_info intel_broadwell_gt3d_info = {
+       BDW_FEATURES,
+       .gen = 8,
+       .is_broadwell = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_broadwell_gt3m_info = {
+       BDW_FEATURES,
+       .gen = 8, .is_mobile = 1,
+       .is_broadwell = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_cherryview_info = {
+       .gen = 8, .num_pipes = 3,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       .is_cherryview = 1,
+       .display_mmio_offset = VLV_DISPLAY_BASE,
+       GEN_CHV_PIPEOFFSETS,
+       CURSOR_OFFSETS,
+       CHV_COLORS,
+};
+
+static const struct intel_device_info intel_skylake_info = {
+       BDW_FEATURES,
+       .is_skylake = 1,
+       .gen = 9,
+};
+
+static const struct intel_device_info intel_skylake_gt3_info = {
+       BDW_FEATURES,
+       .is_skylake = 1,
+       .gen = 9,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+static const struct intel_device_info intel_broxton_info = {
+       .is_broxton = 1,
+       .gen = 9,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+       .num_pipes = 3,
+       .has_ddi = 1,
+       .has_fpga_dbg = 1,
+       .has_fbc = 1,
+       .has_pooled_eu = 0,
+       GEN_DEFAULT_PIPEOFFSETS,
+       IVB_CURSOR_OFFSETS,
+       BDW_COLORS,
+};
+
+static const struct intel_device_info intel_kabylake_info = {
+       BDW_FEATURES,
+       .is_kabylake = 1,
+       .gen = 9,
+};
+
+static const struct intel_device_info intel_kabylake_gt3_info = {
+       BDW_FEATURES,
+       .is_kabylake = 1,
+       .gen = 9,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+};
+
+/*
+ * Make sure any device matches here are from most specific to most
+ * general.  For example, since the Quanta match is based on the subsystem
+ * and subvendor IDs, we need it to come before the more general IVB
+ * PCI ID matches, otherwise we'll use the wrong info struct above.
+ */
+static const struct pci_device_id pciidlist[] = {
+       INTEL_I830_IDS(&intel_i830_info),
+       INTEL_I845G_IDS(&intel_845g_info),
+       INTEL_I85X_IDS(&intel_i85x_info),
+       INTEL_I865G_IDS(&intel_i865g_info),
+       INTEL_I915G_IDS(&intel_i915g_info),
+       INTEL_I915GM_IDS(&intel_i915gm_info),
+       INTEL_I945G_IDS(&intel_i945g_info),
+       INTEL_I945GM_IDS(&intel_i945gm_info),
+       INTEL_I965G_IDS(&intel_i965g_info),
+       INTEL_G33_IDS(&intel_g33_info),
+       INTEL_I965GM_IDS(&intel_i965gm_info),
+       INTEL_GM45_IDS(&intel_gm45_info),
+       INTEL_G45_IDS(&intel_g45_info),
+       INTEL_PINEVIEW_IDS(&intel_pineview_info),
+       INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
+       INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
+       INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
+       INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
+       INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
+       INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
+       INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
+       INTEL_HSW_D_IDS(&intel_haswell_d_info),
+       INTEL_HSW_M_IDS(&intel_haswell_m_info),
+       INTEL_VLV_M_IDS(&intel_valleyview_m_info),
+       INTEL_VLV_D_IDS(&intel_valleyview_d_info),
+       INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
+       INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
+       INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
+       INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
+       INTEL_CHV_IDS(&intel_cherryview_info),
+       INTEL_SKL_GT1_IDS(&intel_skylake_info),
+       INTEL_SKL_GT2_IDS(&intel_skylake_info),
+       INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
+       INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
+       INTEL_BXT_IDS(&intel_broxton_info),
+       INTEL_KBL_GT1_IDS(&intel_kabylake_info),
+       INTEL_KBL_GT2_IDS(&intel_kabylake_info),
+       INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
+       INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
+       {0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+extern int i915_driver_load(struct pci_dev *pdev,
+                           const struct pci_device_id *ent);
+
+static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct intel_device_info *intel_info =
+               (struct intel_device_info *) ent->driver_data;
+
+       if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
+               DRM_INFO("This hardware requires preliminary hardware support.\n"
+                        "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+               return -ENODEV;
+       }
+
+       /* Only bind to function 0 of the device. Early generations
+        * used function 1 as a placeholder for multi-head. This causes
+        * us confusion instead, especially on the systems where both
+        * functions have the same PCI-ID!
+        */
+       if (PCI_FUNC(pdev->devfn))
+               return -ENODEV;
+
+       /*
+        * apple-gmux is needed on dual GPU MacBook Pro
+        * to probe the panel if we're the inactive GPU.
+        */
+       if (vga_switcheroo_client_probe_defer(pdev))
+               return -EPROBE_DEFER;
+
+       return i915_driver_load(pdev, ent);
+}
+
+extern void i915_driver_unload(struct drm_device *dev);
+
+static void i915_pci_remove(struct pci_dev *pdev)
+{
+       struct drm_device *dev = pci_get_drvdata(pdev);
+
+       i915_driver_unload(dev);
+       drm_dev_unref(dev);
+}
+
+extern const struct dev_pm_ops i915_pm_ops;
+
+static struct pci_driver i915_pci_driver = {
+       .name = DRIVER_NAME,
+       .id_table = pciidlist,
+       .probe = i915_pci_probe,
+       .remove = i915_pci_remove,
+       .driver.pm = &i915_pm_ops,
+};
+
+static int __init i915_init(void)
+{
+       bool use_kms = true;
+
+       /*
+        * Enable KMS by default, unless explicitly overriden by
+        * either the i915.modeset prarameter or by the
+        * vga_text_mode_force boot option.
+        */
+
+       if (i915.modeset == 0)
+               use_kms = false;
+
+       if (vgacon_text_force() && i915.modeset == -1)
+               use_kms = false;
+
+       if (!use_kms) {
+               /* Silently fail loading to not upset userspace. */
+               DRM_DEBUG_DRIVER("KMS disabled.\n");
+               return 0;
+       }
+
+       return pci_register_driver(&i915_pci_driver);
+}
+
+static void __exit i915_exit(void)
+{
+       if (!i915_pci_driver.driver.owner)
+               return;
+
+       pci_unregister_driver(&i915_pci_driver);
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR("Tungsten Graphics, Inc.");
+MODULE_AUTHOR("Intel Corporation");
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
new file mode 100644 (file)
index 0000000..c0cb297
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _I915_PVINFO_H_
+#define _I915_PVINFO_H_
+
+/* The MMIO offset of the shared info between guest and host emulator */
+#define VGT_PVINFO_PAGE        0x78000
+#define VGT_PVINFO_SIZE        0x1000
+
+/*
+ * The following structure pages are defined in GEN MMIO space
+ * for virtualization. (One page for now)
+ */
+#define VGT_MAGIC         0x4776544776544776ULL        /* 'vGTvGTvG' */
+#define VGT_VERSION_MAJOR 1
+#define VGT_VERSION_MINOR 0
+
+#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
+#define INTEL_VGT_IF_VERSION \
+       INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
+
+/*
+ * notifications from guest to vgpu device model
+ */
+enum vgt_g2v_type {
+       VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
+       VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
+       VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
+       VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
+       VGT_G2V_EXECLIST_CONTEXT_CREATE,
+       VGT_G2V_EXECLIST_CONTEXT_DESTROY,
+       VGT_G2V_MAX,
+};
+
+struct vgt_if {
+       u64 magic;              /* VGT_MAGIC */
+       uint16_t version_major;
+       uint16_t version_minor;
+       u32 vgt_id;             /* ID of vGT instance */
+       u32 rsv1[12];           /* pad to offset 0x40 */
+       /*
+        *  Data structure to describe the balooning info of resources.
+        *  Each VM can only have one portion of continuous area for now.
+        *  (May support scattered resource in future)
+        *  (starting from offset 0x40)
+        */
+       struct {
+               /* Aperture register balooning */
+               struct {
+                       u32 base;
+                       u32 size;
+               } mappable_gmadr;       /* aperture */
+               /* GMADR register balooning */
+               struct {
+                       u32 base;
+                       u32 size;
+               } nonmappable_gmadr;    /* non aperture */
+               /* allowed fence registers */
+               u32 fence_num;
+               u32 rsv2[3];
+       } avail_rs;             /* available/assigned resource */
+       u32 rsv3[0x200 - 24];   /* pad to half page */
+       /*
+        * The bottom half page is for response from Gfx driver to hypervisor.
+        */
+       u32 rsv4;
+       u32 display_ready;      /* ready for display owner switch */
+
+       u32 rsv5[4];
+
+       u32 g2v_notify;
+       u32 rsv6[7];
+
+       struct {
+               u32 lo;
+               u32 hi;
+       } pdp[4];
+
+       u32 execlist_context_descriptor_lo;
+       u32 execlist_context_descriptor_hi;
+
+       u32  rsv7[0x200 - 24];    /* pad to one page */
+} __packed;
+
+#define vgtif_reg(x) \
+       _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
+
+/* vGPU display status to be used by the host side */
+#define VGT_DRV_DISPLAY_NOT_READY 0
+#define VGT_DRV_DISPLAY_READY     1  /* ready for display switch */
+
+#endif /* _I915_PVINFO_H_ */
index b407411e31ba8e649ccbec2255888a976fffde2c..ce14fe09d96236a32af675b5320aaefb9d508f8c 100644 (file)
@@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   ECOCHK_PPGTT_WT_HSW          (0x2<<3)
 #define   ECOCHK_PPGTT_WB_HSW          (0x3<<3)
 
+#define GEN8_CONFIG0                   _MMIO(0xD00)
+#define  GEN9_DEFAULT_FIXES            (1 << 3 | 1 << 2 | 1 << 1)
+
 #define GAC_ECO_BITS                   _MMIO(0x14090)
 #define   ECOBITS_SNB_BIT              (1<<13)
 #define   ECOBITS_PPGTT_CACHE64B       (3<<8)
@@ -442,6 +445,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
  */
 #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
 
+#define GEN9_MEDIA_POOL_STATE     ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
+#define   GEN9_MEDIA_POOL_ENABLE  (1 << 31)
 #define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
 #define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
 #define   SC_UPDATE_SCISSOR       (0x1<<1)
@@ -713,6 +718,9 @@ enum skl_disp_power_wells {
        /* Not actual bit groups. Used as IDs for lookup_power_well() */
        SKL_DISP_PW_ALWAYS_ON,
        SKL_DISP_PW_DC_OFF,
+
+       BXT_DPIO_CMN_A,
+       BXT_DPIO_CMN_BC,
 };
 
 #define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
@@ -886,7 +894,7 @@ enum skl_disp_power_wells {
  * PLLs can be routed to any transcoder A/B/C.
  *
  * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
- * digital port D (CHV) or port A (BXT).
+ * digital port D (CHV) or port A (BXT). ::
  *
  *
  *     Dual channel PHY (VLV/CHV/BXT)
@@ -1273,6 +1281,15 @@ enum skl_disp_power_wells {
 #define BXT_P_CR_GT_DISP_PWRON         _MMIO(0x138090)
 #define   GT_DISPLAY_POWER_ON(phy)     (1 << (phy))
 
+#define _BXT_PHY_CTL_DDI_A             0x64C00
+#define _BXT_PHY_CTL_DDI_B             0x64C10
+#define _BXT_PHY_CTL_DDI_C             0x64C20
+#define   BXT_PHY_CMNLANE_POWERDOWN_ACK        (1 << 10)
+#define   BXT_PHY_LANE_POWERDOWN_ACK   (1 << 9)
+#define   BXT_PHY_LANE_ENABLED         (1 << 8)
+#define BXT_PHY_CTL(port)              _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
+                                                        _BXT_PHY_CTL_DDI_B)
+
 #define _PHY_CTL_FAMILY_EDP            0x64C80
 #define _PHY_CTL_FAMILY_DDI            0x64C90
 #define   COMMON_RESET_DIS             (1 << 31)
@@ -1669,6 +1686,12 @@ enum skl_disp_power_wells {
 
 #define GEN7_TLB_RD_ADDR       _MMIO(0x4700)
 
+#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
+#define   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS  (1<<18)
+
+#define GAMT_CHKN_BIT_REG      _MMIO(0x4ab8)
+#define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING     (1<<28)
+
 #if 0
 #define PRB0_TAIL      _MMIO(0x2030)
 #define PRB0_HEAD      _MMIO(0x2034)
@@ -1804,6 +1827,10 @@ enum skl_disp_power_wells {
 #define   GEN9_IZ_HASHING_MASK(slice)                  (0x3 << ((slice) * 2))
 #define   GEN9_IZ_HASHING(slice, val)                  ((val) << ((slice) * 2))
 
+/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
+#define   GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+
 /* WaClearTdlStateAckDirtyBits */
 #define GEN8_STATE_ACK         _MMIO(0x20F0)
 #define GEN9_STATE_ACK_SLICE1  _MMIO(0x20F8)
@@ -2161,6 +2188,9 @@ enum skl_disp_power_wells {
 
 #define FBC_LL_SIZE            (1536)
 
+#define FBC_LLC_READ_CTRL      _MMIO(0x9044)
+#define   FBC_LLC_FULLY_OPEN   (1<<30)
+
 /* Framebuffer compression for GM45+ */
 #define DPFC_CB_BASE           _MMIO(0x3200)
 #define DPFC_CONTROL           _MMIO(0x3208)
@@ -2200,6 +2230,8 @@ enum skl_disp_power_wells {
 #define ILK_DPFC_STATUS                _MMIO(0x43210)
 #define ILK_DPFC_FENCE_YOFF    _MMIO(0x43218)
 #define ILK_DPFC_CHICKEN       _MMIO(0x43224)
+#define   ILK_DPFC_DISABLE_DUMMY0 (1<<8)
+#define   ILK_DPFC_NUKE_ON_ANY_MODIFICATION    (1<<23)
 #define ILK_FBC_RT_BASE                _MMIO(0x2128)
 #define   ILK_FBC_RT_VALID     (1<<0)
 #define   SNB_FBC_FRONT_BUFFER (1<<1)
@@ -2449,6 +2481,8 @@ enum skl_disp_power_wells {
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK      0x0000003f
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT     0
 
+#define RAWCLK_FREQ_VLV                _MMIO(VLV_DISPLAY_BASE + 0x6024)
+
 #define _FPA0  0x6040
 #define _FPA1  0x6044
 #define _FPB0  0x6048
@@ -3020,6 +3054,18 @@ enum skl_disp_power_wells {
 /* Same as Haswell, but 72064 bytes now. */
 #define GEN8_CXT_TOTAL_SIZE            (18 * PAGE_SIZE)
 
+enum {
+       INTEL_ADVANCED_CONTEXT = 0,
+       INTEL_LEGACY_32B_CONTEXT,
+       INTEL_ADVANCED_AD_CONTEXT,
+       INTEL_LEGACY_64B_CONTEXT
+};
+
+#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
+#define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
+                               INTEL_LEGACY_64B_CONTEXT : \
+                               INTEL_LEGACY_32B_CONTEXT)
+
 #define CHV_CLK_CTL1                   _MMIO(0x101100)
 #define VLV_CLK_CTL2                   _MMIO(0x101104)
 #define   CLK_CTL2_CZCOUNT_30NS_SHIFT  28
@@ -6031,6 +6077,10 @@ enum skl_disp_power_wells {
 #define CHICKEN_PAR1_1         _MMIO(0x42080)
 #define  DPA_MASK_VBLANK_SRD   (1 << 15)
 #define  FORCE_ARB_IDLE_PLANES (1 << 14)
+#define  SKL_EDP_PSR_FIX_RDWRAP        (1 << 3)
+
+#define CHICKEN_PAR2_1         _MMIO(0x42090)
+#define  KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
 
 #define _CHICKEN_PIPESL_1_A    0x420b0
 #define _CHICKEN_PIPESL_1_B    0x420b4
@@ -6039,6 +6089,7 @@ enum skl_disp_power_wells {
 #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
 #define DISP_ARB_CTL   _MMIO(0x45000)
+#define  DISP_FBC_MEMORY_WAKE          (1<<31)
 #define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
 #define  DISP_FBC_WM_DIS               (1<<15)
 #define DISP_ARB_CTL2  _MMIO(0x45004)
@@ -6052,6 +6103,9 @@ enum skl_disp_power_wells {
 #define HSW_NDE_RSTWRN_OPT     _MMIO(0x46408)
 #define  RESET_PCH_HANDSHAKE_ENABLE    (1<<4)
 
+#define GEN8_CHICKEN_DCPR_1            _MMIO(0x46430)
+#define   MASK_WAKEMEM                 (1<<13)
+
 #define SKL_DFSM                       _MMIO(0x51000)
 #define SKL_DFSM_CDCLK_LIMIT_MASK      (3 << 23)
 #define SKL_DFSM_CDCLK_LIMIT_675       (0 << 23)
@@ -6067,8 +6121,10 @@ enum skl_disp_power_wells {
 
 #define FF_SLICE_CS_CHICKEN2                   _MMIO(0x20e4)
 #define  GEN9_TSG_BARRIER_ACK_DISABLE          (1<<8)
+#define  GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE  (1<<10)
 
 #define GEN9_CS_DEBUG_MODE1            _MMIO(0x20ec)
+#define GEN9_CTX_PREEMPT_REG           _MMIO(0x2248)
 #define GEN8_CS_CHICKEN1               _MMIO(0x2580)
 
 /* GEN7 chicken */
@@ -6076,6 +6132,7 @@ enum skl_disp_power_wells {
 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC     ((1<<10) | (1<<26))
 # define GEN9_RHWO_OPTIMIZATION_DISABLE                (1<<14)
 #define COMMON_SLICE_CHICKEN2                  _MMIO(0x7014)
+# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
 # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE  (1<<0)
 
 #define HIZ_CHICKEN                                    _MMIO(0x7018)
@@ -6089,7 +6146,14 @@ enum skl_disp_power_wells {
 #define  VLV_B0_WA_L3SQCREG1_VALUE             0x00D30000
 
 #define GEN8_L3SQCREG1                         _MMIO(0xB100)
-#define  BDW_WA_L3SQCREG1_DEFAULT              0x784000
+/*
+ * Note that on CHV the following has an off-by-one error wrt. to BSpec.
+ * Using the formula in BSpec leads to a hang, while the formula here works
+ * fine and matches the formulas for all other platforms. A BSpec change
+ * request has been filed to clarify this.
+ */
+#define  L3_GENERAL_PRIO_CREDITS(x)            (((x) >> 1) << 19)
+#define  L3_HIGH_PRIO_CREDITS(x)               (((x) >> 1) << 14)
 
 #define GEN7_L3CNTLREG1                                _MMIO(0xB01C)
 #define  GEN7_WA_FOR_GEN7_L3_CONTROL                   0x3C47FF8C
@@ -6921,6 +6985,7 @@ enum skl_disp_power_wells {
 #define    EDRAM_SETS_IDX(cap)                 (((cap) >> 8) & 0x3)
 
 #define GEN6_UCGCTL1                           _MMIO(0x9400)
+# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE               (1 << 22)
 # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE             (1 << 16)
 # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE               (1 << 5)
 # define GEN6_CSUNIT_CLOCK_GATE_DISABLE                        (1 << 7)
@@ -6937,6 +7002,7 @@ enum skl_disp_power_wells {
 
 #define GEN7_UCGCTL4                           _MMIO(0x940c)
 #define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE      (1<<25)
+#define  GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE     (1<<14)
 
 #define GEN6_RCGCTL1                           _MMIO(0x9410)
 #define GEN6_RCGCTL2                           _MMIO(0x9414)
@@ -7007,7 +7073,8 @@ enum skl_disp_power_wells {
 #define GEN6_RPDEUC                            _MMIO(0xA084)
 #define GEN6_RPDEUCSW                          _MMIO(0xA088)
 #define GEN6_RC_STATE                          _MMIO(0xA094)
-#define   RC6_STATE                            (1 << 18)
+#define   RC_SW_TARGET_STATE_SHIFT             16
+#define   RC_SW_TARGET_STATE_MASK              (7 << RC_SW_TARGET_STATE_SHIFT)
 #define GEN6_RC1_WAKE_RATE_LIMIT               _MMIO(0xA098)
 #define GEN6_RC6_WAKE_RATE_LIMIT               _MMIO(0xA09C)
 #define GEN6_RC6pp_WAKE_RATE_LIMIT             _MMIO(0xA0A0)
@@ -7021,13 +7088,17 @@ enum skl_disp_power_wells {
 #define VLV_RCEDATA                            _MMIO(0xA0BC)
 #define GEN6_RC6pp_THRESHOLD                   _MMIO(0xA0C0)
 #define GEN6_PMINTRMSK                         _MMIO(0xA168)
-#define GEN8_PMINTR_REDIRECT_TO_NON_DISP       (1<<31)
+#define   GEN8_PMINTR_REDIRECT_TO_NON_DISP     (1<<31)
+#define GEN8_MISC_CTRL0                                _MMIO(0xA180)
 #define VLV_PWRDWNUPCTL                                _MMIO(0xA294)
 #define GEN9_MEDIA_PG_IDLE_HYSTERESIS          _MMIO(0xA0C4)
 #define GEN9_RENDER_PG_IDLE_HYSTERESIS         _MMIO(0xA0C8)
 #define GEN9_PG_ENABLE                         _MMIO(0xA210)
 #define GEN9_RENDER_PG_ENABLE                  (1<<0)
 #define GEN9_MEDIA_PG_ENABLE                   (1<<1)
+#define GEN8_PUSHBUS_CONTROL                   _MMIO(0xA248)
+#define GEN8_PUSHBUS_ENABLE                    _MMIO(0xA250)
+#define GEN8_PUSHBUS_SHIFT                     _MMIO(0xA25C)
 
 #define VLV_CHICKEN_3                          _MMIO(VLV_DISPLAY_BASE + 0x7040C)
 #define  PIXEL_OVERLAP_CNT_MASK                        (3 << 30)
@@ -7557,14 +7628,15 @@ enum skl_disp_power_wells {
 #define  CDCLK_FREQ_540                        (1<<26)
 #define  CDCLK_FREQ_337_308            (2<<26)
 #define  CDCLK_FREQ_675_617            (3<<26)
-#define  CDCLK_FREQ_DECIMAL_MASK       (0x7ff)
-
 #define  BXT_CDCLK_CD2X_DIV_SEL_MASK   (3<<22)
 #define  BXT_CDCLK_CD2X_DIV_SEL_1      (0<<22)
 #define  BXT_CDCLK_CD2X_DIV_SEL_1_5    (1<<22)
 #define  BXT_CDCLK_CD2X_DIV_SEL_2      (2<<22)
 #define  BXT_CDCLK_CD2X_DIV_SEL_4      (3<<22)
+#define  BXT_CDCLK_CD2X_PIPE(pipe)     ((pipe)<<20)
+#define  BXT_CDCLK_CD2X_PIPE_NONE      BXT_CDCLK_CD2X_PIPE(3)
 #define  BXT_CDCLK_SSA_PRECHARGE_ENABLE        (1<<16)
+#define  CDCLK_FREQ_DECIMAL_MASK       (0x7ff)
 
 /* LCPLL_CTL */
 #define LCPLL1_CTL             _MMIO(0x46010)
@@ -8140,6 +8212,8 @@ enum skl_disp_power_wells {
 #define _MIPIA_EOT_DISABLE             (dev_priv->mipi_mmio_base + 0xb05c)
 #define _MIPIC_EOT_DISABLE             (dev_priv->mipi_mmio_base + 0xb85c)
 #define MIPI_EOT_DISABLE(port)         _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
+#define  BXT_DEFEATURE_DPI_FIFO_CTR                    (1 << 9)
+#define  BXT_DPHY_DEFEATURE_EN                         (1 << 8)
 #define  LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE          (1 << 7)
 #define  HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE          (1 << 6)
 #define  LOW_CONTENTION_RECOVERY_DISABLE               (1 << 5)
index 34e061a9ef0673fde527c9ecbd14aabf532bb926..5cfe4c7716b4ec8a73a94ab66bdd27e9ae0abe76 100644 (file)
@@ -31,7 +31,7 @@
 
 static void i915_save_display(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Display arbitration control */
        if (INTEL_INFO(dev)->gen <= 4)
@@ -63,7 +63,7 @@ static void i915_save_display(struct drm_device *dev)
 
 static void i915_restore_display(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 mask = 0xffffffff;
 
        /* Display arbitration */
@@ -103,7 +103,7 @@ static void i915_restore_display(struct drm_device *dev)
 
 int i915_save_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        mutex_lock(&dev->struct_mutex);
@@ -148,7 +148,7 @@ int i915_save_state(struct drm_device *dev)
 
 int i915_restore_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        mutex_lock(&dev->struct_mutex);
index 2d576b7ff2999ae65ab5008ed624bd2b70dd18b6..d61829e54f93d74b4b1b7aab81c9c704c472d38f 100644 (file)
 static u32 calc_residency(struct drm_device *dev,
                          i915_reg_t reg)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u64 raw_time; /* 32b value may overflow during fixed point math */
        u64 units = 128ULL, div = 100000ULL;
        u32 ret;
 
-       if (!intel_enable_rc6(dev))
+       if (!intel_enable_rc6())
                return 0;
 
        intel_runtime_pm_get(dev_priv);
@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
 static ssize_t
 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = dev_to_drm_minor(kdev);
-       return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
+       return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
 }
 
 static ssize_t
@@ -167,7 +166,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
        struct device *dev = kobj_to_dev(kobj);
        struct drm_minor *dminor = dev_to_drm_minor(dev);
        struct drm_device *drm_dev = dminor->dev;
-       struct drm_i915_private *dev_priv = drm_dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(drm_dev);
        int slice = (int)(uintptr_t)attr->private;
        int ret;
 
@@ -203,8 +202,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
        struct device *dev = kobj_to_dev(kobj);
        struct drm_minor *dminor = dev_to_drm_minor(dev);
        struct drm_device *drm_dev = dminor->dev;
-       struct drm_i915_private *dev_priv = drm_dev->dev_private;
-       struct intel_context *ctx;
+       struct drm_i915_private *dev_priv = to_i915(drm_dev);
+       struct i915_gem_context *ctx;
        u32 *temp = NULL; /* Just here to make handling failures easy */
        int slice = (int)(uintptr_t)attr->private;
        int ret;
@@ -228,13 +227,6 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
                }
        }
 
-       ret = i915_gpu_idle(drm_dev);
-       if (ret) {
-               kfree(temp);
-               mutex_unlock(&drm_dev->struct_mutex);
-               return ret;
-       }
-
        /* TODO: Ideally we really want a GPU reset here to make sure errors
         * aren't propagated. Since I cannot find a stable way to reset the GPU
         * at this point it is left as a TODO.
@@ -276,7 +268,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -310,7 +302,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -331,7 +323,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        return snprintf(buf, PAGE_SIZE,
                        "%d\n",
@@ -342,7 +334,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -360,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
        ssize_t ret;
 
@@ -397,7 +389,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
        /* We still need *_set_rps to process the new max_delay and
         * update the interrupt limits and PMINTRMSK even though
         * frequency request may be unchanged. */
-       intel_set_rps(dev, val);
+       intel_set_rps(dev_priv, val);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -410,7 +402,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -428,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
        ssize_t ret;
 
@@ -461,7 +453,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
        /* We still need *_set_rps to process the new min_delay and
         * update the interrupt limits and PMINTRMSK even though
         * frequency request may be unchanged. */
-       intel_set_rps(dev, val);
+       intel_set_rps(dev_priv, val);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
@@ -488,7 +480,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
 {
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
 
        if (attr == &dev_attr_gt_RP0_freq_mhz)
index dc0def210097eeae4311b2777b0776473d69cfc3..534154e05fbe47b70870e4149e8265caf4a89572 100644 (file)
@@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = i915->dev->primary->index;
+                          __entry->dev = i915->drm.primary->index;
                           __entry->target = target;
                           __entry->flags = flags;
                           ),
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = from->dev->primary->index;
+                          __entry->dev = from->i915->drm.primary->index;
                           __entry->sync_from = from->id;
                           __entry->sync_to = to_req->engine->id;
                           __entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
                             ),
 
            TP_fast_assign(
-                          struct intel_engine_cs *engine =
-                                               i915_gem_request_get_engine(req);
-                          __entry->dev = engine->dev->primary->index;
-                          __entry->ring = engine->id;
-                          __entry->seqno = i915_gem_request_get_seqno(req);
+                          __entry->dev = req->i915->drm.primary->index;
+                          __entry->ring = req->engine->id;
+                          __entry->seqno = req->seqno;
                           __entry->flags = flags;
-                          i915_trace_irq_get(engine, req);
+                          intel_engine_enable_signaling(req);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = req->engine->dev->primary->index;
+                          __entry->dev = req->i915->drm.primary->index;
                           __entry->ring = req->engine->id;
                           __entry->invalidate = invalidate;
                           __entry->flush = flush;
@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
                             ),
 
            TP_fast_assign(
-                          struct intel_engine_cs *engine =
-                                               i915_gem_request_get_engine(req);
-                          __entry->dev = engine->dev->primary->index;
-                          __entry->ring = engine->id;
-                          __entry->seqno = i915_gem_request_get_seqno(req);
+                          __entry->dev = req->i915->drm.primary->index;
+                          __entry->ring = req->engine->id;
+                          __entry->seqno = req->seqno;
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -560,9 +556,9 @@ TRACE_EVENT(i915_gem_request_notify,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = engine->dev->primary->index;
+                          __entry->dev = engine->i915->drm.primary->index;
                           __entry->ring = engine->id;
-                          __entry->seqno = engine->get_seqno(engine);
+                          __entry->seqno = intel_engine_get_seqno(engine);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
             * less desirable.
             */
            TP_fast_assign(
-                          struct intel_engine_cs *engine =
-                                               i915_gem_request_get_engine(req);
-                          __entry->dev = engine->dev->primary->index;
-                          __entry->ring = engine->id;
-                          __entry->seqno = i915_gem_request_get_seqno(req);
+                          __entry->dev = req->i915->drm.primary->index;
+                          __entry->ring = req->engine->id;
+                          __entry->seqno = req->seqno;
                           __entry->blocking =
-                                    mutex_is_locked(&engine->dev->struct_mutex);
+                                    mutex_is_locked(&req->i915->drm.struct_mutex);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -740,19 +734,19 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
  * the context.
  */
 DECLARE_EVENT_CLASS(i915_context,
-       TP_PROTO(struct intel_context *ctx),
+       TP_PROTO(struct i915_gem_context *ctx),
        TP_ARGS(ctx),
 
        TP_STRUCT__entry(
                        __field(u32, dev)
-                       __field(struct intel_context *, ctx)
+                       __field(struct i915_gem_context *, ctx)
                        __field(struct i915_address_space *, vm)
        ),
 
        TP_fast_assign(
                        __entry->ctx = ctx;
                        __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
-                       __entry->dev = ctx->i915->dev->primary->index;
+                       __entry->dev = ctx->i915->drm.primary->index;
        ),
 
        TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
@@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context,
 )
 
 DEFINE_EVENT(i915_context, i915_context_create,
-       TP_PROTO(struct intel_context *ctx),
+       TP_PROTO(struct i915_gem_context *ctx),
        TP_ARGS(ctx)
 );
 
 DEFINE_EVENT(i915_context, i915_context_free,
-       TP_PROTO(struct intel_context *ctx),
+       TP_PROTO(struct i915_gem_context *ctx),
        TP_ARGS(ctx)
 );
 
@@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free,
  * called only if full ppgtt is enabled.
  */
 TRACE_EVENT(switch_mm,
-       TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
+       TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
 
        TP_ARGS(engine, to),
 
        TP_STRUCT__entry(
                        __field(u32, ring)
-                       __field(struct intel_context *, to)
+                       __field(struct i915_gem_context *, to)
                        __field(struct i915_address_space *, vm)
                        __field(u32, dev)
        ),
@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
                        __entry->ring = engine->id;
                        __entry->to = to;
                        __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
-                       __entry->dev = engine->dev->primary->index;
+                       __entry->dev = engine->i915->drm.primary->index;
        ),
 
        TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
index d02efb8cad4dccd7779e3d9897646cc0e7c5f049..f6acb5a0e7015f8112277c26002f1c4cbbcc85c3 100644 (file)
 
 /**
  * i915_check_vgpu - detect virtual GPU
- * @dev: drm device *
+ * @dev_priv: i915 device private
  *
  * This function is called at the initialization stage, to detect whether
  * running on a vGPU.
  */
-void i915_check_vgpu(struct drm_device *dev)
+void i915_check_vgpu(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        uint64_t magic;
        uint32_t version;
 
        BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
-       if (!IS_HASWELL(dev))
+       if (!IS_HASWELL(dev_priv))
                return;
 
        magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
@@ -102,10 +101,13 @@ static struct _balloon_info_ bl_info;
  * This function is called to deallocate the ballooned-out graphic memory, when
  * driver is unloaded or when ballooning fails.
  */
-void intel_vgt_deballoon(void)
+void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
 {
        int i;
 
+       if (!intel_vgpu_active(dev_priv))
+               return;
+
        DRM_DEBUG("VGT deballoon.\n");
 
        for (i = 0; i < 4; i++) {
@@ -151,36 +153,35 @@ static int vgt_balloon_space(struct drm_mm *mm,
  * of its graphic space being zero. Yet there are some portions ballooned out(
  * the shadow part, which are marked as reserved by drm allocator). From the
  * host point of view, the graphic address space is partitioned by multiple
- * vGPUs in different VMs.
+ * vGPUs in different VMs. ::
  *
  *                        vGPU1 view         Host view
  *             0 ------> +-----------+     +-----------+
- *               ^       |///////////|     |   vGPU3   |
- *               |       |///////////|     +-----------+
- *               |       |///////////|     |   vGPU2   |
+ *               ^       |###########|     |   vGPU3   |
+ *               |       |###########|     +-----------+
+ *               |       |###########|     |   vGPU2   |
  *               |       +-----------+     +-----------+
  *        mappable GM    | available | ==> |   vGPU1   |
  *               |       +-----------+     +-----------+
- *               |       |///////////|     |           |
- *               v       |///////////|     |   Host    |
+ *               |       |###########|     |           |
+ *               v       |###########|     |   Host    |
  *               +=======+===========+     +===========+
- *               ^       |///////////|     |   vGPU3   |
- *               |       |///////////|     +-----------+
- *               |       |///////////|     |   vGPU2   |
+ *               ^       |###########|     |   vGPU3   |
+ *               |       |###########|     +-----------+
+ *               |       |###########|     |   vGPU2   |
  *               |       +-----------+     +-----------+
  *      unmappable GM    | available | ==> |   vGPU1   |
  *               |       +-----------+     +-----------+
- *               |       |///////////|     |           |
- *               |       |///////////|     |   Host    |
- *               v       |///////////|     |           |
+ *               |       |###########|     |           |
+ *               |       |###########|     |   Host    |
+ *               v       |###########|     |           |
  * total GM size ------> +-----------+     +-----------+
  *
  * Returns:
  * zero on success, non-zero if configuration invalid or ballooning failed
  */
-int intel_vgt_balloon(struct drm_device *dev)
+int intel_vgt_balloon(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
 
@@ -188,6 +189,9 @@ int intel_vgt_balloon(struct drm_device *dev)
        unsigned long unmappable_base, unmappable_size, unmappable_end;
        int ret;
 
+       if (!intel_vgpu_active(dev_priv))
+               return 0;
+
        mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
        mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
        unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
@@ -259,6 +263,6 @@ int intel_vgt_balloon(struct drm_device *dev)
 
 err:
        DRM_ERROR("VGT balloon fail\n");
-       intel_vgt_deballoon();
+       intel_vgt_deballoon(dev_priv);
        return ret;
 }
index 3c83b47b5f699756a4d3199a94a6e6500e70577f..3c3b2d24e830447ff0023b47bf7a38d21f5048cc 100644 (file)
 #ifndef _I915_VGPU_H_
 #define _I915_VGPU_H_
 
-/* The MMIO offset of the shared info between guest and host emulator */
-#define VGT_PVINFO_PAGE        0x78000
-#define VGT_PVINFO_SIZE        0x1000
+#include "i915_pvinfo.h"
 
-/*
- * The following structure pages are defined in GEN MMIO space
- * for virtualization. (One page for now)
- */
-#define VGT_MAGIC         0x4776544776544776ULL        /* 'vGTvGTvG' */
-#define VGT_VERSION_MAJOR 1
-#define VGT_VERSION_MINOR 0
-
-#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
-#define INTEL_VGT_IF_VERSION \
-       INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
-
-/*
- * notifications from guest to vgpu device model
- */
-enum vgt_g2v_type {
-       VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
-       VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
-       VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
-       VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
-       VGT_G2V_EXECLIST_CONTEXT_CREATE,
-       VGT_G2V_EXECLIST_CONTEXT_DESTROY,
-       VGT_G2V_MAX,
-};
-
-struct vgt_if {
-       uint64_t magic;         /* VGT_MAGIC */
-       uint16_t version_major;
-       uint16_t version_minor;
-       uint32_t vgt_id;        /* ID of vGT instance */
-       uint32_t rsv1[12];      /* pad to offset 0x40 */
-       /*
-        *  Data structure to describe the balooning info of resources.
-        *  Each VM can only have one portion of continuous area for now.
-        *  (May support scattered resource in future)
-        *  (starting from offset 0x40)
-        */
-       struct {
-               /* Aperture register balooning */
-               struct {
-                       uint32_t base;
-                       uint32_t size;
-               } mappable_gmadr;       /* aperture */
-               /* GMADR register balooning */
-               struct {
-                       uint32_t base;
-                       uint32_t size;
-               } nonmappable_gmadr;    /* non aperture */
-               /* allowed fence registers */
-               uint32_t fence_num;
-               uint32_t rsv2[3];
-       } avail_rs;             /* available/assigned resource */
-       uint32_t rsv3[0x200 - 24];      /* pad to half page */
-       /*
-        * The bottom half page is for response from Gfx driver to hypervisor.
-        */
-       uint32_t rsv4;
-       uint32_t display_ready; /* ready for display owner switch */
-
-       uint32_t rsv5[4];
-
-       uint32_t g2v_notify;
-       uint32_t rsv6[7];
-
-       struct {
-               uint32_t lo;
-               uint32_t hi;
-       } pdp[4];
-
-       uint32_t execlist_context_descriptor_lo;
-       uint32_t execlist_context_descriptor_hi;
-
-       uint32_t  rsv7[0x200 - 24];    /* pad to one page */
-} __packed;
-
-#define vgtif_reg(x) \
-       _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
-
-/* vGPU display status to be used by the host side */
-#define VGT_DRV_DISPLAY_NOT_READY 0
-#define VGT_DRV_DISPLAY_READY     1  /* ready for display switch */
-
-extern void i915_check_vgpu(struct drm_device *dev);
-extern int intel_vgt_balloon(struct drm_device *dev);
-extern void intel_vgt_deballoon(void);
+void i915_check_vgpu(struct drm_i915_private *dev_priv);
+int intel_vgt_balloon(struct drm_i915_private *dev_priv);
+void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
 
 #endif /* _I915_VGPU_H_ */
index 50ff90aea72187cd9da6fb5437a6af08b11f63d6..c5a166752edaeed6f7d619bf23d2958894019d83 100644 (file)
@@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
 
                        /* plane scaler case: assign as a plane scaler */
                        /* find the plane that set the bit as scaler_user */
-                       plane = drm_state->planes[i];
+                       plane = drm_state->planes[i].ptr;
 
                        /*
                         * to enable/disable hq mode, add planes that are using scaler
@@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
                                continue;
                        }
 
-                       plane_state = to_intel_plane_state(drm_state->plane_states[i]);
+                       plane_state = intel_atomic_get_existing_plane_state(drm_state,
+                                                                           intel_plane);
                        scaler_id = &plane_state->scaler_id;
                }
 
index 02a7527ce7bb4e20d37699d7365b42e6d02b2d31..6700a7be7f78755675d6a8c1c17c69377d0117b4 100644 (file)
@@ -154,7 +154,7 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
 {
        if (((mode->clock == TMDS_297M) ||
                 (mode->clock == TMDS_296M)) &&
-               intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+               intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
                return true;
        else
                return false;
@@ -165,7 +165,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
                               i915_reg_t reg_elda, uint32_t bits_elda,
                               i915_reg_t reg_edid)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        uint8_t *eld = connector->eld;
        uint32_t tmp;
        int i;
@@ -189,7 +189,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
 
 static void g4x_audio_codec_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        uint32_t eldv, tmp;
 
        DRM_DEBUG_KMS("Disable audio codec\n");
@@ -210,7 +210,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
                                   const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        uint8_t *eld = connector->eld;
        uint32_t eldv;
        uint32_t tmp;
@@ -247,7 +247,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
 
 static void hsw_audio_codec_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        enum pipe pipe = intel_crtc->pipe;
        uint32_t tmp;
@@ -262,7 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
        tmp |= AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_UPPER_N_MASK;
        tmp &= ~AUD_CONFIG_LOWER_N_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        I915_WRITE(HSW_AUD_CFG(pipe), tmp);
 
@@ -279,7 +279,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
                                   const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        enum pipe pipe = intel_crtc->pipe;
        struct i915_audio_component *acomp = dev_priv->audio_component;
@@ -328,7 +328,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
        tmp = I915_READ(HSW_AUD_CFG(pipe));
        tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
        tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        else
                tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -357,7 +357,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
 
 static void ilk_audio_codec_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_digital_port *intel_dig_port =
                enc_to_dig_port(&encoder->base);
@@ -389,7 +389,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
        tmp |= AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_UPPER_N_MASK;
        tmp &= ~AUD_CONFIG_LOWER_N_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        I915_WRITE(aud_config, tmp);
 
@@ -405,7 +405,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
                                   struct intel_encoder *encoder,
                                   const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_digital_port *intel_dig_port =
                enc_to_dig_port(&encoder->base);
@@ -475,7 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
        tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
        tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        else
                tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -496,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        struct drm_connector *connector;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_audio_component *acomp = dev_priv->audio_component;
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        enum port port = intel_dig_port->port;
@@ -513,7 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
 
        /* ELD Conn_Type */
        connector->eld[5] &= ~(3 << 2);
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+       if (intel_crtc_has_dp_encoder(crtc->config))
                connector->eld[5] |= (1 << 2);
 
        connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -543,7 +543,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_audio_component *acomp = dev_priv->audio_component;
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        enum port port = intel_dig_port->port;
@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
 static int i915_audio_component_get_cdclk_freq(struct device *dev)
 {
        struct drm_i915_private *dev_priv = dev_to_i915(dev);
-       int ret;
 
        if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
                return -ENODEV;
 
-       intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-       ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
-
-       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
-       return ret;
+       return dev_priv->cdclk_freq;
 }
 
 static int i915_audio_component_sync_audio_rate(struct device *dev,
@@ -755,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev,
        if (WARN_ON(acomp->ops || acomp->dev))
                return -EEXIST;
 
-       drm_modeset_lock_all(dev_priv->dev);
+       drm_modeset_lock_all(&dev_priv->drm);
        acomp->ops = &i915_audio_component_ops;
        acomp->dev = i915_dev;
        BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
        for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
                acomp->aud_sample_rate[i] = 0;
        dev_priv->audio_component = acomp;
-       drm_modeset_unlock_all(dev_priv->dev);
+       drm_modeset_unlock_all(&dev_priv->drm);
 
        return 0;
 }
@@ -773,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev,
        struct i915_audio_component *acomp = data;
        struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
 
-       drm_modeset_lock_all(dev_priv->dev);
+       drm_modeset_lock_all(&dev_priv->drm);
        acomp->ops = NULL;
        acomp->dev = NULL;
        dev_priv->audio_component = NULL;
-       drm_modeset_unlock_all(dev_priv->dev);
+       drm_modeset_unlock_all(&dev_priv->drm);
 }
 
 static const struct component_ops i915_audio_component_bind_ops = {
@@ -805,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-       ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+       ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
        if (ret < 0) {
                DRM_ERROR("failed to add audio component (%d)\n", ret);
                /* continue with reduced functionality */
@@ -827,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
        if (!dev_priv->audio_component_registered)
                return;
 
-       component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
+       component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
        dev_priv->audio_component_registered = false;
 }
index b235b6e88eadb82fa3875b2325494540bf584acb..c6e69e4cfa8314a051277797bed0ec1fbcceb3ea 100644 (file)
@@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
        else
                panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
 
+       panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
+               dvo_timing->himage_lo;
+       panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
+               dvo_timing->vimage_lo;
+
        /* Some VBTs have bogus h/vtotal values */
        if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
                panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -213,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 
        dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
 
-       ret = intel_opregion_get_panel_type(dev_priv->dev);
+       ret = intel_opregion_get_panel_type(dev_priv);
        if (ret >= 0) {
                WARN_ON(ret > 0xf);
                panel_type = ret;
@@ -318,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
                return;
        }
 
+       dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+       if (bdb->version >= 191 &&
+           get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
+               const struct bdb_lfp_backlight_control_method *method;
+
+               method = &backlight_data->backlight_control[panel_type];
+               dev_priv->vbt.backlight.type = method->type;
+       }
+
        dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
        dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
        dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
@@ -763,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
                return;
        }
 
+       /*
+        * These fields are introduced from the VBT version 197 onwards,
+        * so making sure that these bits are set zero in the previous
+        * versions.
+        */
+       if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
+               dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
+               dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
+       }
+
        /* We have mandatory mipi config blocks. Initialize as generic panel */
        dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
 }
@@ -1187,7 +1211,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
        }
        if (bdb->version < 106) {
                expected_size = 22;
-       } else if (bdb->version < 109) {
+       } else if (bdb->version < 111) {
                expected_size = 27;
        } else if (bdb->version < 195) {
                BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
@@ -1402,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
 int
 intel_bios_init(struct drm_i915_private *dev_priv)
 {
-       struct pci_dev *pdev = dev_priv->dev->pdev;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        const struct vbt_header *vbt = dev_priv->opregion.vbt;
        const struct bdb_header *bdb;
        u8 __iomem *bios = NULL;
@@ -1545,6 +1569,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
        return false;
 }
 
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv:  i915 device instance
+ * @port:      port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+       static const struct {
+               u16 dp, hdmi;
+       } port_mapping[] = {
+               [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+               [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+               [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+               [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+       };
+       int i;
+
+       /* FIXME maybe deal with port A as well? */
+       if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+               return false;
+
+       if (!dev_priv->vbt.child_dev_num)
+               return false;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               const union child_device_config *p_child =
+                       &dev_priv->vbt.child_dev[i];
+               if ((p_child->common.dvo_port == port_mapping[port].dp ||
+                    p_child->common.dvo_port == port_mapping[port].hdmi) &&
+                   (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+                                                   DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+                       return true;
+       }
+
+       return false;
+}
+
 /**
  * intel_bios_is_port_edp - is the device in given port eDP
  * @dev_priv:  i915 device instance
index ab0ea315eddb1665a96b78fbde53d72e004c1773..8405b5a367d7164a15df16ea5c453b472e965916 100644 (file)
 #ifndef _INTEL_BIOS_H_
 #define _INTEL_BIOS_H_
 
+enum intel_backlight_type {
+       INTEL_BACKLIGHT_PMIC,
+       INTEL_BACKLIGHT_LPSS,
+       INTEL_BACKLIGHT_DISPLAY_DDI,
+       INTEL_BACKLIGHT_DSI_DCS,
+       INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+};
+
 struct edp_power_seq {
        u16 t1_t3;
        u16 t8;
@@ -113,7 +121,13 @@ struct mipi_config {
        u16 dual_link:2;
        u16 lane_cnt:2;
        u16 pixel_overlap:3;
-       u16 rsvd3:9;
+       u16 rgb_flip:1;
+#define DL_DCS_PORT_A                  0x00
+#define DL_DCS_PORT_C                  0x01
+#define DL_DCS_PORT_A_AND_C            0x02
+       u16 dl_dcs_cabc_ports:2;
+       u16 dl_dcs_backlight_ports:2;
+       u16 rsvd3:4;
 
        u16 rsvd4;
 
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
new file mode 100644 (file)
index 0000000..b074f3d
--- /dev/null
@@ -0,0 +1,595 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+
+#include "i915_drv.h"
+
+static void intel_breadcrumbs_fake_irq(unsigned long data)
+{
+       struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+
+       /*
+        * The timer persists in case we cannot enable interrupts,
+        * or if we have previously seen seqno/interrupt incoherency
+        * ("missed interrupt" syndrome). Here the worker will wake up
+        * every jiffie in order to kick the oldest waiter to do the
+        * coherent seqno check.
+        */
+       rcu_read_lock();
+       if (intel_engine_wakeup(engine))
+               mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+       rcu_read_unlock();
+}
+
+static void irq_enable(struct intel_engine_cs *engine)
+{
+       /* Enabling the IRQ may miss the generation of the interrupt, but
+        * we still need to force the barrier before reading the seqno,
+        * just in case.
+        */
+       engine->breadcrumbs.irq_posted = true;
+
+       spin_lock_irq(&engine->i915->irq_lock);
+       engine->irq_enable(engine);
+       spin_unlock_irq(&engine->i915->irq_lock);
+}
+
+static void irq_disable(struct intel_engine_cs *engine)
+{
+       spin_lock_irq(&engine->i915->irq_lock);
+       engine->irq_disable(engine);
+       spin_unlock_irq(&engine->i915->irq_lock);
+
+       engine->breadcrumbs.irq_posted = false;
+}
+
+static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
+{
+       struct intel_engine_cs *engine =
+               container_of(b, struct intel_engine_cs, breadcrumbs);
+       struct drm_i915_private *i915 = engine->i915;
+
+       assert_spin_locked(&b->lock);
+       if (b->rpm_wakelock)
+               return;
+
+       /* Since we are waiting on a request, the GPU should be busy
+        * and should have its own rpm reference. For completeness,
+        * record an rpm reference for ourselves to cover the
+        * interrupt we unmask.
+        */
+       intel_runtime_pm_get_noresume(i915);
+       b->rpm_wakelock = true;
+
+       /* No interrupts? Kick the waiter every jiffie! */
+       if (intel_irqs_enabled(i915)) {
+               if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
+                       irq_enable(engine);
+               b->irq_enabled = true;
+       }
+
+       if (!b->irq_enabled ||
+           test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
+               mod_timer(&b->fake_irq, jiffies + 1);
+
+       /* Ensure that even if the GPU hangs, we get woken up.
+        *
+        * However, note that if no one is waiting, we never notice
+        * a gpu hang. Eventually, we will have to wait for a resource
+        * held by the GPU and so trigger a hangcheck. In the most
+        * pathological case, this will be upon memory starvation!
+        */
+       i915_queue_hangcheck(i915);
+}
+
+static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
+{
+       struct intel_engine_cs *engine =
+               container_of(b, struct intel_engine_cs, breadcrumbs);
+
+       assert_spin_locked(&b->lock);
+       if (!b->rpm_wakelock)
+               return;
+
+       if (b->irq_enabled) {
+               irq_disable(engine);
+               b->irq_enabled = false;
+       }
+
+       intel_runtime_pm_put(engine->i915);
+       b->rpm_wakelock = false;
+}
+
+static inline struct intel_wait *to_wait(struct rb_node *node)
+{
+       return container_of(node, struct intel_wait, node);
+}
+
+static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
+                                             struct intel_wait *wait)
+{
+       assert_spin_locked(&b->lock);
+
+       /* This request is completed, so remove it from the tree, mark it as
+        * complete, and *then* wake up the associated task.
+        */
+       rb_erase(&wait->node, &b->waiters);
+       RB_CLEAR_NODE(&wait->node);
+
+       wake_up_process(wait->tsk); /* implicit smp_wmb() */
+}
+
+static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
+                                   struct intel_wait *wait)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct rb_node **p, *parent, *completed;
+       bool first;
+       u32 seqno;
+
+       /* Insert the request into the retirement ordered list
+        * of waiters by walking the rbtree. If we are the oldest
+        * seqno in the tree (the first to be retired), then
+        * set ourselves as the bottom-half.
+        *
+        * As we descend the tree, prune completed branches since we hold the
+        * spinlock we know that the first_waiter must be delayed and can
+        * reduce some of the sequential wake up latency if we take action
+        * ourselves and wake up the completed tasks in parallel. Also, by
+        * removing stale elements in the tree, we may be able to reduce the
+        * ping-pong between the old bottom-half and ourselves as first-waiter.
+        */
+       first = true;
+       parent = NULL;
+       completed = NULL;
+       seqno = intel_engine_get_seqno(engine);
+
+        /* If the request completed before we managed to grab the spinlock,
+         * return now before adding ourselves to the rbtree. We let the
+         * current bottom-half handle any pending wakeups and instead
+         * try and get out of the way quickly.
+         */
+       if (i915_seqno_passed(seqno, wait->seqno)) {
+               RB_CLEAR_NODE(&wait->node);
+               return first;
+       }
+
+       p = &b->waiters.rb_node;
+       while (*p) {
+               parent = *p;
+               if (wait->seqno == to_wait(parent)->seqno) {
+                       /* We have multiple waiters on the same seqno, select
+                        * the highest priority task (that with the smallest
+                        * task->prio) to serve as the bottom-half for this
+                        * group.
+                        */
+                       if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
+                               p = &parent->rb_right;
+                               first = false;
+                       } else {
+                               p = &parent->rb_left;
+                       }
+               } else if (i915_seqno_passed(wait->seqno,
+                                            to_wait(parent)->seqno)) {
+                       p = &parent->rb_right;
+                       if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
+                               completed = parent;
+                       else
+                               first = false;
+               } else {
+                       p = &parent->rb_left;
+               }
+       }
+       rb_link_node(&wait->node, parent, p);
+       rb_insert_color(&wait->node, &b->waiters);
+       GEM_BUG_ON(!first && !b->irq_seqno_bh);
+
+       if (completed) {
+               struct rb_node *next = rb_next(completed);
+
+               GEM_BUG_ON(!next && !first);
+               if (next && next != &wait->node) {
+                       GEM_BUG_ON(first);
+                       b->first_wait = to_wait(next);
+                       smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+                       /* As there is a delay between reading the current
+                        * seqno, processing the completed tasks and selecting
+                        * the next waiter, we may have missed the interrupt
+                        * and so need for the next bottom-half to wakeup.
+                        *
+                        * Also as we enable the IRQ, we may miss the
+                        * interrupt for that seqno, so we have to wake up
+                        * the next bottom-half in order to do a coherent check
+                        * in case the seqno passed.
+                        */
+                       __intel_breadcrumbs_enable_irq(b);
+                       if (READ_ONCE(b->irq_posted))
+                               wake_up_process(to_wait(next)->tsk);
+               }
+
+               do {
+                       struct intel_wait *crumb = to_wait(completed);
+                       completed = rb_prev(completed);
+                       __intel_breadcrumbs_finish(b, crumb);
+               } while (completed);
+       }
+
+       if (first) {
+               GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
+               b->first_wait = wait;
+               smp_store_mb(b->irq_seqno_bh, wait->tsk);
+               /* After assigning ourselves as the new bottom-half, we must
+                * perform a cursory check to prevent a missed interrupt.
+                * Either we miss the interrupt whilst programming the hardware,
+                * or if there was a previous waiter (for a later seqno) they
+                * may be woken instead of us (due to the inherent race
+                * in the unlocked read of b->irq_seqno_bh in the irq handler)
+                * and so we miss the wake up.
+                */
+               __intel_breadcrumbs_enable_irq(b);
+       }
+       GEM_BUG_ON(!b->irq_seqno_bh);
+       GEM_BUG_ON(!b->first_wait);
+       GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
+
+       return first;
+}
+
+bool intel_engine_add_wait(struct intel_engine_cs *engine,
+                          struct intel_wait *wait)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       bool first;
+
+       spin_lock(&b->lock);
+       first = __intel_engine_add_wait(engine, wait);
+       spin_unlock(&b->lock);
+
+       return first;
+}
+
+void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
+{
+       mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+}
+
+static inline bool chain_wakeup(struct rb_node *rb, int priority)
+{
+       return rb && to_wait(rb)->tsk->prio <= priority;
+}
+
+static inline int wakeup_priority(struct intel_breadcrumbs *b,
+                                 struct task_struct *tsk)
+{
+       if (tsk == b->signaler)
+               return INT_MIN;
+       else
+               return tsk->prio;
+}
+
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+                             struct intel_wait *wait)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+       /* Quick check to see if this waiter was already decoupled from
+        * the tree by the bottom-half to avoid contention on the spinlock
+        * by the herd.
+        */
+       if (RB_EMPTY_NODE(&wait->node))
+               return;
+
+       spin_lock(&b->lock);
+
+       if (RB_EMPTY_NODE(&wait->node))
+               goto out_unlock;
+
+       if (b->first_wait == wait) {
+               const int priority = wakeup_priority(b, wait->tsk);
+               struct rb_node *next;
+
+               GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
+
+               /* We are the current bottom-half. Find the next candidate,
+                * the first waiter in the queue on the remaining oldest
+                * request. As multiple seqnos may complete in the time it
+                * takes us to wake up and find the next waiter, we have to
+                * wake up that waiter for it to perform its own coherent
+                * completion check.
+                */
+               next = rb_next(&wait->node);
+               if (chain_wakeup(next, priority)) {
+                       /* If the next waiter is already complete,
+                        * wake it up and continue onto the next waiter. So
+                        * if have a small herd, they will wake up in parallel
+                        * rather than sequentially, which should reduce
+                        * the overall latency in waking all the completed
+                        * clients.
+                        *
+                        * However, waking up a chain adds extra latency to
+                        * the first_waiter. This is undesirable if that
+                        * waiter is a high priority task.
+                        */
+                       u32 seqno = intel_engine_get_seqno(engine);
+
+                       while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
+                               struct rb_node *n = rb_next(next);
+
+                               __intel_breadcrumbs_finish(b, to_wait(next));
+                               next = n;
+                               if (!chain_wakeup(next, priority))
+                                       break;
+                       }
+               }
+
+               if (next) {
+                       /* In our haste, we may have completed the first waiter
+                        * before we enabled the interrupt. Do so now as we
+                        * have a second waiter for a future seqno. Afterwards,
+                        * we have to wake up that waiter in case we missed
+                        * the interrupt, or if we have to handle an
+                        * exception rather than a seqno completion.
+                        */
+                       b->first_wait = to_wait(next);
+                       smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
+                       if (b->first_wait->seqno != wait->seqno)
+                               __intel_breadcrumbs_enable_irq(b);
+                       wake_up_process(b->irq_seqno_bh);
+               } else {
+                       b->first_wait = NULL;
+                       WRITE_ONCE(b->irq_seqno_bh, NULL);
+                       __intel_breadcrumbs_disable_irq(b);
+               }
+       } else {
+               GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
+       }
+
+       GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
+       rb_erase(&wait->node, &b->waiters);
+
+out_unlock:
+       GEM_BUG_ON(b->first_wait == wait);
+       GEM_BUG_ON(rb_first(&b->waiters) !=
+                  (b->first_wait ? &b->first_wait->node : NULL));
+       GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
+       spin_unlock(&b->lock);
+}
+
+static bool signal_complete(struct drm_i915_gem_request *request)
+{
+       if (!request)
+               return false;
+
+       /* If another process served as the bottom-half it may have already
+        * signalled that this wait is already completed.
+        */
+       if (intel_wait_complete(&request->signaling.wait))
+               return true;
+
+       /* Carefully check if the request is complete, giving time for the
+        * seqno to be visible or if the GPU hung.
+        */
+       if (__i915_request_irq_complete(request))
+               return true;
+
+       return false;
+}
+
+static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
+{
+       return container_of(rb, struct drm_i915_gem_request, signaling.node);
+}
+
+static void signaler_set_rtpriority(void)
+{
+        struct sched_param param = { .sched_priority = 1 };
+
+        sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+}
+
+static int intel_breadcrumbs_signaler(void *arg)
+{
+       struct intel_engine_cs *engine = arg;
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct drm_i915_gem_request *request;
+
+       /* Install ourselves with high priority to reduce signalling latency */
+       signaler_set_rtpriority();
+
+       do {
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               /* We are either woken up by the interrupt bottom-half,
+                * or by a client adding a new signaller. In both cases,
+                * the GPU seqno may have advanced beyond our oldest signal.
+                * If it has, propagate the signal, remove the waiter and
+                * check again with the next oldest signal. Otherwise we
+                * need to wait for a new interrupt from the GPU or for
+                * a new client.
+                */
+               request = READ_ONCE(b->first_signal);
+               if (signal_complete(request)) {
+                       /* Wake up all other completed waiters and select the
+                        * next bottom-half for the next user interrupt.
+                        */
+                       intel_engine_remove_wait(engine,
+                                                &request->signaling.wait);
+
+                       /* Find the next oldest signal. Note that as we have
+                        * not been holding the lock, another client may
+                        * have installed an even older signal than the one
+                        * we just completed - so double check we are still
+                        * the oldest before picking the next one.
+                        */
+                       spin_lock(&b->lock);
+                       if (request == b->first_signal) {
+                               struct rb_node *rb =
+                                       rb_next(&request->signaling.node);
+                               b->first_signal = rb ? to_signaler(rb) : NULL;
+                       }
+                       rb_erase(&request->signaling.node, &b->signals);
+                       spin_unlock(&b->lock);
+
+                       i915_gem_request_unreference(request);
+               } else {
+                       if (kthread_should_stop())
+                               break;
+
+                       schedule();
+               }
+       } while (1);
+       __set_current_state(TASK_RUNNING);
+
+       return 0;
+}
+
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
+{
+       struct intel_engine_cs *engine = request->engine;
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct rb_node *parent, **p;
+       bool first, wakeup;
+
+       if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
+               return;
+
+       spin_lock(&b->lock);
+       if (unlikely(request->signaling.wait.tsk)) {
+               wakeup = false;
+               goto unlock;
+       }
+
+       request->signaling.wait.tsk = b->signaler;
+       request->signaling.wait.seqno = request->seqno;
+       i915_gem_request_reference(request);
+
+       /* First add ourselves into the list of waiters, but register our
+        * bottom-half as the signaller thread. As per usual, only the oldest
+        * waiter (not just signaller) is tasked as the bottom-half waking
+        * up all completed waiters after the user interrupt.
+        *
+        * If we are the oldest waiter, enable the irq (after which we
+        * must double check that the seqno did not complete).
+        */
+       wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
+
+       /* Now insert ourselves into the retirement ordered list of signals
+        * on this engine. We track the oldest seqno as that will be the
+        * first signal to complete.
+        */
+       parent = NULL;
+       first = true;
+       p = &b->signals.rb_node;
+       while (*p) {
+               parent = *p;
+               if (i915_seqno_passed(request->seqno,
+                                     to_signaler(parent)->seqno)) {
+                       p = &parent->rb_right;
+                       first = false;
+               } else {
+                       p = &parent->rb_left;
+               }
+       }
+       rb_link_node(&request->signaling.node, parent, p);
+       rb_insert_color(&request->signaling.node, &b->signals);
+       if (first)
+               smp_store_mb(b->first_signal, request);
+
+unlock:
+       spin_unlock(&b->lock);
+
+       if (wakeup)
+               wake_up_process(b->signaler);
+}
+
+int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct task_struct *tsk;
+
+       spin_lock_init(&b->lock);
+       setup_timer(&b->fake_irq,
+                   intel_breadcrumbs_fake_irq,
+                   (unsigned long)engine);
+
+       /* Spawn a thread to provide a common bottom-half for all signals.
+        * As this is an asynchronous interface we cannot steal the current
+        * task for handling the bottom-half to the user interrupt, therefore
+        * we create a thread to do the coherent seqno dance after the
+        * interrupt and then signal the waitqueue (via the dma-buf/fence).
+        */
+       tsk = kthread_run(intel_breadcrumbs_signaler, engine,
+                         "i915/signal:%d", engine->id);
+       if (IS_ERR(tsk))
+               return PTR_ERR(tsk);
+
+       b->signaler = tsk;
+
+       return 0;
+}
+
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
+{
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+       if (!IS_ERR_OR_NULL(b->signaler))
+               kthread_stop(b->signaler);
+
+       del_timer_sync(&b->fake_irq);
+}
+
+unsigned int intel_kick_waiters(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       unsigned int mask = 0;
+
+       /* To avoid the task_struct disappearing beneath us as we wake up
+        * the process, we must first inspect the task_struct->state under the
+        * RCU lock, i.e. as we call wake_up_process() we must be holding the
+        * rcu_read_lock().
+        */
+       rcu_read_lock();
+       for_each_engine(engine, i915)
+               if (unlikely(intel_engine_wakeup(engine)))
+                       mask |= intel_engine_flag(engine);
+       rcu_read_unlock();
+
+       return mask;
+}
+
+unsigned int intel_kick_signalers(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       unsigned int mask = 0;
+
+       for_each_engine(engine, i915) {
+               if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
+                       wake_up_process(engine->breadcrumbs.signaler);
+                       mask |= intel_engine_flag(engine);
+               }
+       }
+
+       return mask;
+}
index 1b3f97449395fe0a4f2c2c53f34ec1b0d54161c2..bc0fef3d33356a1fb39f543ec04c31577694ecdb 100644 (file)
@@ -96,7 +96,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
 {
        struct drm_crtc *crtc = crtc_state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int i, pipe = intel_crtc->pipe;
        uint16_t coeffs[9] = { 0, };
@@ -207,7 +207,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
 {
        struct drm_crtc *crtc = state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = to_intel_crtc(crtc)->pipe;
        uint32_t mode;
 
@@ -255,7 +255,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
 void intel_color_set_csc(struct drm_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc_state->crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (dev_priv->display.load_csc_matrix)
                dev_priv->display.load_csc_matrix(crtc_state);
@@ -266,13 +266,13 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
                                    struct drm_property_blob *blob)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
        int i;
 
        if (HAS_GMCH_DISPLAY(dev)) {
-               if (intel_crtc->config->has_dsi_encoder)
+               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
@@ -313,7 +313,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
 {
        struct drm_crtc *crtc = crtc_state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *intel_crtc_state =
                to_intel_crtc_state(crtc_state);
@@ -343,7 +343,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
 {
        struct drm_crtc *crtc = state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
        uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
@@ -426,7 +426,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
 {
        struct drm_crtc *crtc = state->crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
        struct drm_color_lut *lut;
        uint32_t i, lut_size;
@@ -485,7 +485,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
 void intel_color_load_luts(struct drm_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc_state->crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->display.load_luts(crtc_state);
 }
@@ -526,7 +526,7 @@ int intel_color_check(struct drm_crtc *crtc,
 void intel_color_init(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        drm_mode_crtc_set_gamma_size(crtc, 256);
 
@@ -547,7 +547,8 @@ void intel_color_init(struct drm_crtc *crtc)
        /* Enable color management support when we have degamma & gamma LUTs. */
        if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
            INTEL_INFO(dev)->color.gamma_lut_size != 0)
-               drm_helper_crtc_enable_color_mgmt(crtc,
+               drm_crtc_enable_color_mgmt(crtc,
                                        INTEL_INFO(dev)->color.degamma_lut_size,
+                                       true,
                                        INTEL_INFO(dev)->color.gamma_lut_size);
 }
index 3fbb6fc664516cdced5ab80ad1d0a8fe4cc70219..827b6ef4e9aedfa669c91942d71f84e800841c38 100644 (file)
@@ -67,7 +67,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
        enum intel_display_power_domain power_domain;
        u32 tmp;
@@ -98,7 +98,7 @@ out:
 
 static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
        u32 tmp, flags = 0;
 
@@ -146,7 +146,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -281,7 +281,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct intel_crt *crt = intel_attached_crt(connector);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 adpa;
        bool ret;
 
@@ -301,8 +301,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 
                I915_WRITE(crt->adpa_reg, adpa);
 
-               if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
-                            1000))
+               if (intel_wait_for_register(dev_priv,
+                                           crt->adpa_reg,
+                                           ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+                                           1000))
                        DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
 
                if (turn_off_dac) {
@@ -326,11 +328,26 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct intel_crt *crt = intel_attached_crt(connector);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       bool reenable_hpd;
        u32 adpa;
        bool ret;
        u32 save_adpa;
 
+       /*
+        * Doing a force trigger causes a hpd interrupt to get sent, which can
+        * get us stuck in a loop if we're polling:
+        *  - We enable power wells and reset the ADPA
+        *  - output_poll_exec does force probe on VGA, triggering a hpd
+        *  - HPD handler waits for poll to unlock dev->mode_config.mutex
+        *  - output_poll_exec shuts off the ADPA, unlocks
+        *    dev->mode_config.mutex
+        *  - HPD handler runs, resets ADPA and brings us back to the start
+        *
+        * Just disable HPD interrupts here to prevent this
+        */
+       reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
+
        save_adpa = adpa = I915_READ(crt->adpa_reg);
        DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
 
@@ -338,8 +355,10 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 
        I915_WRITE(crt->adpa_reg, adpa);
 
-       if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
-                    1000)) {
+       if (intel_wait_for_register(dev_priv,
+                                   crt->adpa_reg,
+                                   ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
+                                   1000)) {
                DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
                I915_WRITE(crt->adpa_reg, save_adpa);
        }
@@ -353,6 +372,9 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 
        DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
 
+       if (reenable_hpd)
+               intel_hpd_enable(dev_priv, crt->base.hpd_pin);
+
        return ret;
 }
 
@@ -367,7 +389,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 stat;
        bool ret = false;
        int i, tries = 0;
@@ -394,9 +416,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
                                              CRT_HOTPLUG_FORCE_DETECT,
                                              CRT_HOTPLUG_FORCE_DETECT);
                /* wait for FORCE_DETECT to go off */
-               if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
-                             CRT_HOTPLUG_FORCE_DETECT) == 0,
-                            1000))
+               if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
+                                           CRT_HOTPLUG_FORCE_DETECT, 0,
+                                           1000))
                        DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
        }
 
@@ -449,7 +471,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
 static bool intel_crt_detect_ddc(struct drm_connector *connector)
 {
        struct intel_crt *crt = intel_attached_crt(connector);
-       struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
        struct edid *edid;
        struct i2c_adapter *i2c;
 
@@ -485,7 +507,7 @@ static enum drm_connector_status
 intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
 {
        struct drm_device *dev = crt->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t save_bclrpat;
        uint32_t save_vtotal;
        uint32_t vtotal, vactive;
@@ -600,7 +622,7 @@ static enum drm_connector_status
 intel_crt_detect(struct drm_connector *connector, bool force)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_attached_crt(connector);
        struct intel_encoder *intel_encoder = &crt->base;
        enum intel_display_power_domain power_domain;
@@ -681,7 +703,7 @@ static void intel_crt_destroy(struct drm_connector *connector)
 static int intel_crt_get_modes(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crt *crt = intel_attached_crt(connector);
        struct intel_encoder *intel_encoder = &crt->base;
        enum intel_display_power_domain power_domain;
@@ -713,11 +735,11 @@ static int intel_crt_set_property(struct drm_connector *connector,
        return 0;
 }
 
-static void intel_crt_reset(struct drm_connector *connector)
+void intel_crt_reset(struct drm_encoder *encoder)
 {
-       struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crt *crt = intel_attached_crt(connector);
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
 
        if (INTEL_INFO(dev)->gen >= 5) {
                u32 adpa;
@@ -739,10 +761,11 @@ static void intel_crt_reset(struct drm_connector *connector)
  */
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
-       .reset = intel_crt_reset,
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_crt_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
+       .late_register = intel_connector_register,
+       .early_unregister = intel_connector_unregister,
        .destroy = intel_crt_destroy,
        .set_property = intel_crt_set_property,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -753,10 +776,10 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
        .mode_valid = intel_crt_mode_valid,
        .get_modes = intel_crt_get_modes,
-       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+       .reset = intel_crt_reset,
        .destroy = intel_encoder_destroy,
 };
 
@@ -791,7 +814,7 @@ void intel_crt_init(struct drm_device *dev)
        struct drm_connector *connector;
        struct intel_crt *crt;
        struct intel_connector *intel_connector;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t adpa_reg;
        u32 adpa;
 
@@ -839,7 +862,7 @@ void intel_crt_init(struct drm_device *dev)
                           &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
        drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
-                        DRM_MODE_ENCODER_DAC, NULL);
+                        DRM_MODE_ENCODER_DAC, "CRT");
 
        intel_connector_attach_encoder(intel_connector, &crt->base);
 
@@ -876,12 +899,9 @@ void intel_crt_init(struct drm_device *dev)
                crt->base.get_hw_state = intel_crt_get_hw_state;
        }
        intel_connector->get_hw_state = intel_connector_get_hw_state;
-       intel_connector->unregister = intel_connector_unregister;
 
        drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
-       drm_connector_register(connector);
-
        if (!I915_HAS_HOTPLUG(dev))
                intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 
@@ -902,5 +922,5 @@ void intel_crt_init(struct drm_device *dev)
                dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
        }
 
-       intel_crt_reset(connector);
+       intel_crt_reset(&crt->base.base);
 }
index a34c23eceba0448bb93b5afdb338a82db90e179f..3edb9580928e5b30a35d855a87f6bcd66158b454 100644 (file)
  * be moved to FW_FAILED.
  */
 
+#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
+
 #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 23)
+
 #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
 
 #define FIRMWARE_URL  "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
 
-MODULE_FIRMWARE(I915_CSR_SKL);
-MODULE_FIRMWARE(I915_CSR_BXT);
 
-#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 23)
-#define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
+
 
 #define CSR_MAX_FW_SIZE                        0x2FFF
 #define CSR_DEFAULT_FW_OFFSET          0xFFFFFFFF
@@ -169,12 +175,10 @@ struct stepping_info {
        char substepping;
 };
 
-/*
- * Kabylake derivated from Skylake H0, so SKL H0
- * is the right firmware for KBL A0 (revid 0).
- */
 static const struct stepping_info kbl_stepping_info[] = {
-       {'H', '0'}, {'I', '0'}
+       {'A', '0'}, {'B', '0'}, {'C', '0'},
+       {'D', '0'}, {'E', '0'}, {'F', '0'},
+       {'G', '0'}, {'H', '0'}, {'I', '0'},
 };
 
 static const struct stepping_info skl_stepping_info[] = {
@@ -282,7 +286,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
        uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
        uint32_t i;
        uint32_t *dmc_payload;
-       uint32_t required_min_version;
+       uint32_t required_version;
 
        if (!fw)
                return NULL;
@@ -298,23 +302,24 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
        csr->version = css_header->version;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
-               required_min_version = SKL_CSR_VERSION_REQUIRED;
+       if (IS_KABYLAKE(dev_priv)) {
+               required_version = KBL_CSR_VERSION_REQUIRED;
+       } else if (IS_SKYLAKE(dev_priv)) {
+               required_version = SKL_CSR_VERSION_REQUIRED;
        } else if (IS_BROXTON(dev_priv)) {
-               required_min_version = BXT_CSR_VERSION_REQUIRED;
+               required_version = BXT_CSR_VERSION_REQUIRED;
        } else {
                MISSING_CASE(INTEL_REVID(dev_priv));
-               required_min_version = 0;
+               required_version = 0;
        }
 
-       if (csr->version < required_min_version) {
-               DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
-                        " please upgrade to v%u.%u or later"
-                          " [" FIRMWARE_URL "].\n",
+       if (csr->version != required_version) {
+               DRM_INFO("Refusing to load DMC firmware v%u.%u,"
+                        " please use v%u.%u [" FIRMWARE_URL "].\n",
                         CSR_VERSION_MAJOR(csr->version),
                         CSR_VERSION_MINOR(csr->version),
-                        CSR_VERSION_MAJOR(required_min_version),
-                        CSR_VERSION_MINOR(required_min_version));
+                        CSR_VERSION_MAJOR(required_version),
+                        CSR_VERSION_MINOR(required_version));
                return NULL;
        }
 
@@ -407,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work)
        csr = &dev_priv->csr;
 
        ret = request_firmware(&fw, dev_priv->csr.fw_path,
-                              &dev_priv->dev->pdev->dev);
+                              &dev_priv->drm.pdev->dev);
        if (fw)
                dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
 
@@ -421,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work)
                         CSR_VERSION_MAJOR(csr->version),
                         CSR_VERSION_MINOR(csr->version));
        } else {
-               dev_notice(dev_priv->dev->dev,
+               dev_notice(dev_priv->drm.dev,
                           "Failed to load DMC firmware"
                           " [" FIRMWARE_URL "],"
                           " disabling runtime power management.\n");
@@ -446,7 +451,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
        if (!HAS_CSR(dev_priv))
                return;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_KABYLAKE(dev_priv))
+               csr->fw_path = I915_CSR_KBL;
+       else if (IS_SKYLAKE(dev_priv))
                csr->fw_path = I915_CSR_SKL;
        else if (IS_BROXTON(dev_priv))
                csr->fw_path = I915_CSR_BXT;
index 01e523df363b91a8ae1d58215c192bb0501cceb8..dd1d6fe122976edb5f6dd78428f34bb2b4313adb 100644 (file)
@@ -318,7 +318,7 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
        default:
                WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
                /* fallthrough and treat as unknown */
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_EDP:
        case INTEL_OUTPUT_HDMI:
        case INTEL_OUTPUT_UNKNOWN:
@@ -482,7 +482,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
                ddi_translations = ddi_translations_edp;
                size = n_edp_entries;
                break;
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_HDMI:
                ddi_translations = ddi_translations_dp;
                size = n_dp_entries;
@@ -543,7 +543,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 void hsw_fdi_link_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        u32 temp, i, rx_ctl_val;
@@ -834,7 +834,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
        if (pipe_config->has_pch_encoder)
                dotclock = intel_dotclock_calculate(pipe_config->port_clock,
                                                    &pipe_config->fdi_m_n);
-       else if (pipe_config->has_dp_encoder)
+       else if (intel_crtc_has_dp_encoder(pipe_config))
                dotclock = intel_dotclock_calculate(pipe_config->port_clock,
                                                    &pipe_config->dp_m_n);
        else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
@@ -851,7 +851,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
 static void skl_ddi_clock_get(struct intel_encoder *encoder,
                                struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int link_clock = 0;
        uint32_t dpll_ctl1, dpll;
 
@@ -899,7 +899,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
 static void hsw_ddi_clock_get(struct intel_encoder *encoder,
                              struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int link_clock = 0;
        u32 val, pll;
 
@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
 {
        struct intel_shared_dpll *pll;
        struct intel_dpll_hw_state *state;
-       intel_clock_t clock;
+       struct dpll clock;
 
        /* For DDI ports we always use a shared PLL. */
        if (WARN_ON(dpll == DPLL_ID_PRIVATE))
@@ -971,7 +971,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
 static void bxt_ddi_clock_get(struct intel_encoder *encoder,
                                struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = intel_ddi_get_encoder_port(encoder);
        uint32_t dpll = port;
 
@@ -1061,14 +1061,14 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
 
 void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        int type = intel_encoder->type;
        uint32_t temp;
 
-       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
+       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
                WARN_ON(transcoder_is_dsi(cpu_transcoder));
 
                temp = TRANS_MSA_SYNC_CLK;
@@ -1096,7 +1096,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        uint32_t temp;
        temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -1113,7 +1113,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = intel_crtc->pipe;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1182,7 +1182,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
                temp |= TRANS_DDI_MODE_SELECT_FDI;
                temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
 
-       } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
+       } else if (type == INTEL_OUTPUT_DP ||
                   type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
@@ -1223,7 +1223,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
 {
        struct drm_device *dev = intel_connector->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *intel_encoder = intel_connector->encoder;
        int type = intel_connector->base.connector_type;
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1285,7 +1285,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
                            enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_ddi_get_encoder_port(encoder);
        enum intel_display_power_domain power_domain;
        u32 tmp;
@@ -1342,6 +1342,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
        DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
 
 out:
+       if (ret && IS_BROXTON(dev_priv)) {
+               tmp = I915_READ(BXT_PHY_CTL(port));
+               if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
+                           BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
+                       DRM_ERROR("Port %c enabled but PHY powered down? "
+                                 "(PHY_CTL %08x)\n", port_name(port), tmp);
+       }
+
        intel_display_power_put(dev_priv, power_domain);
 
        return ret;
@@ -1351,7 +1359,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
 {
        struct drm_crtc *crtc = &intel_crtc->base;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -1363,7 +1371,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
 
 void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
 {
-       struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
        if (cpu_transcoder != TRANSCODER_EDP)
@@ -1384,7 +1392,7 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
        dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
        hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
 
-       if (type == INTEL_OUTPUT_DISPLAYPORT) {
+       if (type == INTEL_OUTPUT_DP) {
                if (dp_iboost) {
                        iboost = dp_iboost;
                } else {
@@ -1442,7 +1450,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
        if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
                n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
                ddi_translations = bxt_ddi_translations_edp;
-       } else if (type == INTEL_OUTPUT_DISPLAYPORT
+       } else if (type == INTEL_OUTPUT_DP
                        || type == INTEL_OUTPUT_EDP) {
                n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
                ddi_translations = bxt_ddi_translations_dp;
@@ -1616,7 +1624,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 
        intel_ddi_clk_select(intel_encoder, crtc->config);
 
-       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
                intel_dp_set_link_params(intel_dp, crtc->config);
@@ -1640,7 +1648,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        int type = intel_encoder->type;
        uint32_t val;
@@ -1661,7 +1669,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
        if (wait)
                intel_wait_ddi_buf_idle(dev_priv, port);
 
-       if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
+       if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
                intel_edp_panel_vdd_on(intel_dp);
@@ -1687,7 +1695,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
        struct drm_crtc *crtc = encoder->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        int type = intel_encoder->type;
 
@@ -1726,7 +1734,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int type = intel_encoder->type;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (intel_crtc->config->has_audio) {
                intel_audio_codec_disable(intel_encoder);
@@ -1742,9 +1750,11 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        }
 }
 
-static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
-                                  enum dpio_phy phy)
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+                           enum dpio_phy phy)
 {
+       enum port port;
+
        if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
                return false;
 
@@ -1770,38 +1780,51 @@ static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
                return false;
        }
 
+       for_each_port_masked(port,
+                            phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
+                                               BIT(PORT_A)) {
+               u32 tmp = I915_READ(BXT_PHY_CTL(port));
+
+               if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
+                       DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
+                                        "for port %c powered down "
+                                        "(PHY_CTL %08x)\n",
+                                        phy, port_name(port), tmp);
+
+                       return false;
+               }
+       }
+
        return true;
 }
 
-static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 {
        u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
 
        return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
 }
 
-static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
-                                     enum dpio_phy phy)
+static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+                                 enum dpio_phy phy)
 {
-       if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_PORT_REF_DW3(phy),
+                                   GRC_DONE, GRC_DONE,
+                                   10))
                DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
 }
 
-static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
-                                    enum dpio_phy phy);
-
-static void broxton_phy_init(struct drm_i915_private *dev_priv,
-                            enum dpio_phy phy)
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 {
-       enum port port;
-       u32 ports, val;
+       u32 val;
 
-       if (broxton_phy_is_enabled(dev_priv, phy)) {
+       if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
                /* Still read out the GRC value for state verification */
                if (phy == DPIO_PHY0)
-                       dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
+                       dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
 
-               if (broxton_phy_verify_state(dev_priv, phy)) {
+               if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
                        DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
                                         "won't reprogram it\n", phy);
 
@@ -1810,8 +1833,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
 
                DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
                                 "force reprogramming it\n", phy);
-       } else {
-               DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
        }
 
        val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
@@ -1831,28 +1852,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
                DRM_ERROR("timeout during PHY%d power on\n", phy);
        }
 
-       if (phy == DPIO_PHY0)
-               ports = BIT(PORT_B) | BIT(PORT_C);
-       else
-               ports = BIT(PORT_A);
-
-       for_each_port_masked(port, ports) {
-               int lane;
-
-               for (lane = 0; lane < 4; lane++) {
-                       val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
-                       /*
-                        * Note that on CHV this flag is called UPAR, but has
-                        * the same function.
-                        */
-                       val &= ~LATENCY_OPTIM;
-                       if (lane != 1)
-                               val |= LATENCY_OPTIM;
-
-                       I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
-               }
-       }
-
        /* Program PLL Rcomp code offset */
        val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
        val &= ~IREF0RC_OFFSET_MASK;
@@ -1899,10 +1898,7 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
                 * the corresponding calibrated value from PHY1, and disable
                 * the automatic calibration on PHY0.
                 */
-               broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
-
-               val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
-                                                             DPIO_PHY1);
+               val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
                grc_code = val << GRC_CODE_FAST_SHIFT |
                           val << GRC_CODE_SLOW_SHIFT |
                           val;
@@ -1912,31 +1908,16 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
                val |= GRC_DIS | GRC_RDY_OVRD;
                I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
        }
-       /*
-        * During PHY1 init delay waiting for GRC calibration to finish, since
-        * it can happen in parallel with the subsequent PHY0 init.
-        */
 
        val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
        val |= COMMON_RESET_DIS;
        I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-}
-
-void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
-{
-       /* Enable PHY1 first since it provides Rcomp for PHY0 */
-       broxton_phy_init(dev_priv, DPIO_PHY1);
-       broxton_phy_init(dev_priv, DPIO_PHY0);
 
-       /*
-        * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
-        * PHY1 GRC calibration to finish, so wait for it here.
-        */
-       broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
+       if (phy == DPIO_PHY1)
+               bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
 }
 
-static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
-                              enum dpio_phy phy)
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
 {
        uint32_t val;
 
@@ -1949,12 +1930,6 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
        I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
 }
 
-void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
-{
-       broxton_phy_uninit(dev_priv, DPIO_PHY1);
-       broxton_phy_uninit(dev_priv, DPIO_PHY0);
-}
-
 static bool __printf(6, 7)
 __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
                       i915_reg_t reg, u32 mask, u32 expected,
@@ -1982,11 +1957,9 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
        return false;
 }
 
-static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
-                                    enum dpio_phy phy)
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+                             enum dpio_phy phy)
 {
-       enum port port;
-       u32 ports;
        uint32_t mask;
        bool ok;
 
@@ -1994,27 +1967,11 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
        __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt,      \
                               ## __VA_ARGS__)
 
-       /* We expect the PHY to be always enabled */
-       if (!broxton_phy_is_enabled(dev_priv, phy))
+       if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
                return false;
 
        ok = true;
 
-       if (phy == DPIO_PHY0)
-               ports = BIT(PORT_B) | BIT(PORT_C);
-       else
-               ports = BIT(PORT_A);
-
-       for_each_port_masked(port, ports) {
-               int lane;
-
-               for (lane = 0; lane < 4; lane++)
-                       ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
-                                   LATENCY_OPTIM,
-                                   lane != 1 ? LATENCY_OPTIM : 0,
-                                   "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
-       }
-
        /* PLL Rcomp code offset */
        ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
                    IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
@@ -2058,11 +2015,65 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
 #undef _CHK
 }
 
-void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
+static uint8_t
+bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
+                                    struct intel_crtc_state *pipe_config)
 {
-       if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
-           !broxton_phy_verify_state(dev_priv, DPIO_PHY1))
-               i915_report_error(dev_priv, "DDI PHY state mismatch\n");
+       switch (pipe_config->lane_count) {
+       case 1:
+               return 0;
+       case 2:
+               return BIT(2) | BIT(0);
+       case 4:
+               return BIT(3) | BIT(2) | BIT(0);
+       default:
+               MISSING_CASE(pipe_config->lane_count);
+
+               return 0;
+       }
+}
+
+static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+       enum port port = dport->port;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       int lane;
+
+       for (lane = 0; lane < 4; lane++) {
+               u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
+
+               /*
+                * Note that on CHV this flag is called UPAR, but has
+                * the same function.
+                */
+               val &= ~LATENCY_OPTIM;
+               if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
+                       val |= LATENCY_OPTIM;
+
+               I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
+       }
+}
+
+static uint8_t
+bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
+       enum port port = dport->port;
+       int lane;
+       uint8_t mask;
+
+       mask = 0;
+       for (lane = 0; lane < 4; lane++) {
+               u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
+
+               if (val & LATENCY_OPTIM)
+                       mask |= BIT(lane);
+       }
+
+       return mask;
 }
 
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2113,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
 
 void intel_ddi_fdi_disable(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
        uint32_t val;
 
@@ -2146,7 +2157,7 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        struct intel_hdmi *intel_hdmi;
@@ -2200,7 +2211,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                break;
        case TRANS_DDI_MODE_SELECT_DP_SST:
        case TRANS_DDI_MODE_SELECT_DP_MST:
-               pipe_config->has_dp_encoder = true;
                pipe_config->lane_count =
                        ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
                intel_dp_get_m_n(intel_crtc, pipe_config);
@@ -2236,13 +2246,19 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
        }
 
        intel_ddi_clock_get(encoder, pipe_config);
+
+       if (IS_BROXTON(dev_priv))
+               pipe_config->lane_lat_optim_mask =
+                       bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
 }
 
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        int type = encoder->type;
        int port = intel_ddi_get_encoder_port(encoder);
+       int ret;
 
        WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
 
@@ -2250,9 +2266,17 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
                pipe_config->cpu_transcoder = TRANSCODER_EDP;
 
        if (type == INTEL_OUTPUT_HDMI)
-               return intel_hdmi_compute_config(encoder, pipe_config);
+               ret = intel_hdmi_compute_config(encoder, pipe_config);
        else
-               return intel_dp_compute_config(encoder, pipe_config);
+               ret = intel_dp_compute_config(encoder, pipe_config);
+
+       if (IS_BROXTON(dev_priv) && ret)
+               pipe_config->lane_lat_optim_mask =
+                       bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
+                                                            pipe_config);
+
+       return ret;
+
 }
 
 static const struct drm_encoder_funcs intel_ddi_funcs = {
@@ -2297,7 +2321,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
 
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *intel_dig_port;
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
@@ -2347,10 +2371,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        encoder = &intel_encoder->base;
 
        drm_encoder_init(dev, encoder, &intel_ddi_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+                        DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
 
        intel_encoder->compute_config = intel_ddi_compute_config;
        intel_encoder->enable = intel_enable_ddi;
+       if (IS_BROXTON(dev_priv))
+               intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
        intel_encoder->pre_enable = intel_ddi_pre_enable;
        intel_encoder->disable = intel_disable_ddi;
        intel_encoder->post_disable = intel_ddi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
new file mode 100644 (file)
index 0000000..cba137f
--- /dev/null
@@ -0,0 +1,388 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+void intel_device_info_dump(struct drm_i915_private *dev_priv)
+{
+       const struct intel_device_info *info = &dev_priv->info;
+
+#define PRINT_S(name) "%s"
+#define SEP_EMPTY
+#define PRINT_FLAG(name) info->name ? #name "," : ""
+#define SEP_COMMA ,
+       DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
+                        DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
+                        info->gen,
+                        dev_priv->drm.pdev->device,
+                        dev_priv->drm.pdev->revision,
+                        DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
+#undef PRINT_S
+#undef SEP_EMPTY
+#undef PRINT_FLAG
+#undef SEP_COMMA
+}
+
+static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       u32 fuse, eu_dis;
+
+       fuse = I915_READ(CHV_FUSE_GT);
+
+       info->slice_total = 1;
+
+       if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+               info->subslice_per_slice++;
+               eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+                                CHV_FGT_EU_DIS_SS0_R1_MASK);
+               info->eu_total += 8 - hweight32(eu_dis);
+       }
+
+       if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+               info->subslice_per_slice++;
+               eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+                                CHV_FGT_EU_DIS_SS1_R1_MASK);
+               info->eu_total += 8 - hweight32(eu_dis);
+       }
+
+       info->subslice_total = info->subslice_per_slice;
+       /*
+        * CHV expected to always have a uniform distribution of EU
+        * across subslices.
+       */
+       info->eu_per_subslice = info->subslice_total ?
+                               info->eu_total / info->subslice_total :
+                               0;
+       /*
+        * CHV supports subslice power gating on devices with more than
+        * one subslice, and supports EU power gating on devices with
+        * more than one EU pair per subslice.
+       */
+       info->has_slice_pg = 0;
+       info->has_subslice_pg = (info->subslice_total > 1);
+       info->has_eu_pg = (info->eu_per_subslice > 2);
+}
+
+static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       int s_max = 3, ss_max = 4, eu_max = 8;
+       int s, ss;
+       u32 fuse2, s_enable, ss_disable, eu_disable;
+       u8 eu_mask = 0xff;
+
+       fuse2 = I915_READ(GEN8_FUSE2);
+       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
+
+       info->slice_total = hweight32(s_enable);
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+       */
+       info->subslice_per_slice = ss_max - hweight32(ss_disable);
+       info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+       */
+       for (s = 0; s < s_max; s++) {
+               if (!(s_enable & BIT(s)))
+                       /* skip disabled slice */
+                       continue;
+
+               eu_disable = I915_READ(GEN9_EU_DISABLE(s));
+               for (ss = 0; ss < ss_max; ss++) {
+                       int eu_per_ss;
+
+                       if (ss_disable & BIT(ss))
+                               /* skip disabled subslice */
+                               continue;
+
+                       eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
+                                                     eu_mask);
+
+                       /*
+                        * Record which subslice(s) has(have) 7 EUs. we
+                        * can tune the hash used to spread work among
+                        * subslices if they are unbalanced.
+                        */
+                       if (eu_per_ss == 7)
+                               info->subslice_7eu[s] |= BIT(ss);
+
+                       info->eu_total += eu_per_ss;
+               }
+       }
+
+       /*
+        * SKL is expected to always have a uniform distribution
+        * of EU across subslices with the exception that any one
+        * EU in any one subslice may be fused off for die
+        * recovery. BXT is expected to be perfectly uniform in EU
+        * distribution.
+       */
+       info->eu_per_subslice = info->subslice_total ?
+                               DIV_ROUND_UP(info->eu_total,
+                                            info->subslice_total) : 0;
+       /*
+        * SKL supports slice power gating on devices with more than
+        * one slice, and supports EU power gating on devices with
+        * more than one EU pair per subslice. BXT supports subslice
+        * power gating on devices with more than one subslice, and
+        * supports EU power gating on devices with more than one EU
+        * pair per subslice.
+       */
+       info->has_slice_pg =
+               (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+               info->slice_total > 1;
+       info->has_subslice_pg =
+               IS_BROXTON(dev_priv) && info->subslice_total > 1;
+       info->has_eu_pg = info->eu_per_subslice > 2;
+
+       if (IS_BROXTON(dev_priv)) {
+#define IS_SS_DISABLED(_ss_disable, ss)    (_ss_disable & BIT(ss))
+               /*
+                * There is a HW issue in 2x6 fused down parts that requires
+                * Pooled EU to be enabled as a WA. The pool configuration
+                * changes depending upon which subslice is fused down. This
+                * doesn't affect if the device has all 3 subslices enabled.
+                */
+               /* WaEnablePooledEuFor2x6:bxt */
+               info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
+                                      (info->subslice_per_slice == 2 &&
+                                       INTEL_REVID(dev_priv) < BXT_REVID_C0));
+
+               info->min_eu_in_pool = 0;
+               if (info->has_pooled_eu) {
+                       if (IS_SS_DISABLED(ss_disable, 0) ||
+                           IS_SS_DISABLED(ss_disable, 2))
+                               info->min_eu_in_pool = 3;
+                       else if (IS_SS_DISABLED(ss_disable, 1))
+                               info->min_eu_in_pool = 6;
+                       else
+                               info->min_eu_in_pool = 9;
+               }
+#undef IS_SS_DISABLED
+       }
+}
+
+static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       const int s_max = 3, ss_max = 3, eu_max = 8;
+       int s, ss;
+       u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+       fuse2 = I915_READ(GEN8_FUSE2);
+       s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
+       ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
+
+       eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
+       eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
+                        (32 - GEN8_EU_DIS0_S1_SHIFT));
+       eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
+                       ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
+                        (32 - GEN8_EU_DIS1_S2_SHIFT));
+
+       info->slice_total = hweight32(s_enable);
+
+       /*
+        * The subslice disable field is global, i.e. it applies
+        * to each of the enabled slices.
+        */
+       info->subslice_per_slice = ss_max - hweight32(ss_disable);
+       info->subslice_total = info->slice_total * info->subslice_per_slice;
+
+       /*
+        * Iterate through enabled slices and subslices to
+        * count the total enabled EU.
+        */
+       for (s = 0; s < s_max; s++) {
+               if (!(s_enable & (0x1 << s)))
+                       /* skip disabled slice */
+                       continue;
+
+               for (ss = 0; ss < ss_max; ss++) {
+                       u32 n_disabled;
+
+                       if (ss_disable & (0x1 << ss))
+                               /* skip disabled subslice */
+                               continue;
+
+                       n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
+
+                       /*
+                        * Record which subslices have 7 EUs.
+                        */
+                       if (eu_max - n_disabled == 7)
+                               info->subslice_7eu[s] |= 1 << ss;
+
+                       info->eu_total += eu_max - n_disabled;
+               }
+       }
+
+       /*
+        * BDW is expected to always have a uniform distribution of EU across
+        * subslices with the exception that any one EU in any one subslice may
+        * be fused off for die recovery.
+        */
+       info->eu_per_subslice = info->subslice_total ?
+               DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
+
+       /*
+        * BDW supports slice power gating on devices with more than
+        * one slice.
+        */
+       info->has_slice_pg = (info->slice_total > 1);
+       info->has_subslice_pg = 0;
+       info->has_eu_pg = 0;
+}
+
+/*
+ * Determine various intel_device_info fields at runtime.
+ *
+ * Use it when either:
+ *   - it's judged too laborious to fill n static structures with the limit
+ *     when a simple if statement does the job,
+ *   - run-time checks (eg read fuse/strap registers) are needed.
+ *
+ * This function needs to be called:
+ *   - after the MMIO has been setup as we are reading registers,
+ *   - after the PCH has been detected,
+ *   - before the first usage of the fields it can tweak.
+ */
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_device_info *info = mkwrite_device_info(dev_priv);
+       enum pipe pipe;
+
+       /*
+        * Skylake and Broxton currently don't expose the topmost plane as its
+        * use is exclusive with the legacy cursor and we only want to expose
+        * one of those, not both. Until we can safely expose the topmost plane
+        * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+        * we don't expose the topmost plane at all to prevent ABI breakage
+        * down the line.
+        */
+       if (IS_BROXTON(dev_priv)) {
+               info->num_sprites[PIPE_A] = 2;
+               info->num_sprites[PIPE_B] = 2;
+               info->num_sprites[PIPE_C] = 1;
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               for_each_pipe(dev_priv, pipe)
+                       info->num_sprites[pipe] = 2;
+       else
+               for_each_pipe(dev_priv, pipe)
+                       info->num_sprites[pipe] = 1;
+
+       if (i915.disable_display) {
+               DRM_INFO("Display disabled (module parameter)\n");
+               info->num_pipes = 0;
+       } else if (info->num_pipes > 0 &&
+                  (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
+                  HAS_PCH_SPLIT(dev_priv)) {
+               u32 fuse_strap = I915_READ(FUSE_STRAP);
+               u32 sfuse_strap = I915_READ(SFUSE_STRAP);
+
+               /*
+                * SFUSE_STRAP is supposed to have a bit signalling the display
+                * is fused off. Unfortunately it seems that, at least in
+                * certain cases, fused off display means that PCH display
+                * reads don't land anywhere. In that case, we read 0s.
+                *
+                * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
+                * should be set when taking over after the firmware.
+                */
+               if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
+                   sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
+                   (dev_priv->pch_type == PCH_CPT &&
+                    !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
+                       DRM_INFO("Display fused off, disabling\n");
+                       info->num_pipes = 0;
+               } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
+                       DRM_INFO("PipeC fused off\n");
+                       info->num_pipes -= 1;
+               }
+       } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+               u32 dfsm = I915_READ(SKL_DFSM);
+               u8 disabled_mask = 0;
+               bool invalid;
+               int num_bits;
+
+               if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
+                       disabled_mask |= BIT(PIPE_A);
+               if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
+                       disabled_mask |= BIT(PIPE_B);
+               if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
+                       disabled_mask |= BIT(PIPE_C);
+
+               num_bits = hweight8(disabled_mask);
+
+               switch (disabled_mask) {
+               case BIT(PIPE_A):
+               case BIT(PIPE_B):
+               case BIT(PIPE_A) | BIT(PIPE_B):
+               case BIT(PIPE_A) | BIT(PIPE_C):
+                       invalid = true;
+                       break;
+               default:
+                       invalid = false;
+               }
+
+               if (num_bits > info->num_pipes || invalid)
+                       DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
+                                 disabled_mask);
+               else
+                       info->num_pipes -= num_bits;
+       }
+
+       /* Initialize slice/subslice/EU info */
+       if (IS_CHERRYVIEW(dev_priv))
+               cherryview_sseu_info_init(dev_priv);
+       else if (IS_BROADWELL(dev_priv))
+               broadwell_sseu_info_init(dev_priv);
+       else if (INTEL_INFO(dev_priv)->gen >= 9)
+               gen9_sseu_info_init(dev_priv);
+
+       info->has_snoop = !info->has_llc;
+
+       /* Snooping is broken on BXT A stepping. */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+               info->has_snoop = false;
+
+       DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
+       DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
+       DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
+       DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
+       DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+       DRM_DEBUG_DRIVER("has slice power gating: %s\n",
+                        info->has_slice_pg ? "y" : "n");
+       DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
+                        info->has_subslice_pg ? "y" : "n");
+       DRM_DEBUG_DRIVER("has EU power gating: %s\n",
+                        info->has_eu_pg ? "y" : "n");
+}
index 2113f401f0bacf303e2dbcc222d922c3006ed046..c457eed76f1f7a1fca441d4714c2aebf95534d05 100644 (file)
@@ -36,6 +36,7 @@
 #include "intel_drv.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
 #include "intel_dsi.h"
 #include "i915_trace.h"
 #include <drm/drm_atomic.h>
 #include <drm/drm_rect.h>
 #include <linux/dma_remapping.h>
 #include <linux/reservation.h>
-#include <linux/dma-buf.h>
+
+static bool is_mmio_work(struct intel_flip_work *work)
+{
+       return work->mmio_work.func;
+}
 
 /* Primary plane formats for gen <= 3 */
 static const uint32_t i8xx_primary_formats[] = {
@@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
 static void ironlake_pfit_enable(struct intel_crtc *crtc);
 static void intel_modeset_setup_hw_state(struct drm_device *dev);
 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
+static int ilk_max_pixel_rate(struct drm_atomic_state *state);
+static int bxt_calc_cdclk(int max_pixclk);
 
-typedef struct {
-       int     min, max;
-} intel_range_t;
-
-typedef struct {
-       int     dot_limit;
-       int     p2_slow, p2_fast;
-} intel_p2_t;
-
-typedef struct intel_limit intel_limit_t;
 struct intel_limit {
-       intel_range_t   dot, vco, n, m, m1, m2, p, p1;
-       intel_p2_t          p2;
+       struct {
+               int min, max;
+       } dot, vco, n, m, m1, m2, p, p1;
+
+       struct {
+               int dot_limit;
+               int p2_slow, p2_fast;
+       } p2;
 };
 
 /* returns HPLL frequency in kHz */
@@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv)
 static int
 intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
 {
+       /* RAWCLK_FREQ_VLV register updated from power well code */
        return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
                                      CCK_DISPLAY_REF_CLOCK_CONTROL);
 }
@@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
        }
 }
 
-static void intel_update_rawclk(struct drm_i915_private *dev_priv)
+void intel_update_rawclk(struct drm_i915_private *dev_priv)
 {
        if (HAS_PCH_SPLIT(dev_priv))
                dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
@@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
                return 270000;
 }
 
-static const intel_limit_t intel_limits_i8xx_dac = {
+static const struct intel_limit intel_limits_i8xx_dac = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 908000, .max = 1512000 },
        .n = { .min = 2, .max = 16 },
@@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = {
                .p2_slow = 4, .p2_fast = 2 },
 };
 
-static const intel_limit_t intel_limits_i8xx_dvo = {
+static const struct intel_limit intel_limits_i8xx_dvo = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 908000, .max = 1512000 },
        .n = { .min = 2, .max = 16 },
@@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
                .p2_slow = 4, .p2_fast = 4 },
 };
 
-static const intel_limit_t intel_limits_i8xx_lvds = {
+static const struct intel_limit intel_limits_i8xx_lvds = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 908000, .max = 1512000 },
        .n = { .min = 2, .max = 16 },
@@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
                .p2_slow = 14, .p2_fast = 7 },
 };
 
-static const intel_limit_t intel_limits_i9xx_sdvo = {
+static const struct intel_limit intel_limits_i9xx_sdvo = {
        .dot = { .min = 20000, .max = 400000 },
        .vco = { .min = 1400000, .max = 2800000 },
        .n = { .min = 1, .max = 6 },
@@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
                .p2_slow = 10, .p2_fast = 5 },
 };
 
-static const intel_limit_t intel_limits_i9xx_lvds = {
+static const struct intel_limit intel_limits_i9xx_lvds = {
        .dot = { .min = 20000, .max = 400000 },
        .vco = { .min = 1400000, .max = 2800000 },
        .n = { .min = 1, .max = 6 },
@@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
 };
 
 
-static const intel_limit_t intel_limits_g4x_sdvo = {
+static const struct intel_limit intel_limits_g4x_sdvo = {
        .dot = { .min = 25000, .max = 270000 },
        .vco = { .min = 1750000, .max = 3500000},
        .n = { .min = 1, .max = 4 },
@@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
        },
 };
 
-static const intel_limit_t intel_limits_g4x_hdmi = {
+static const struct intel_limit intel_limits_g4x_hdmi = {
        .dot = { .min = 22000, .max = 400000 },
        .vco = { .min = 1750000, .max = 3500000},
        .n = { .min = 1, .max = 4 },
@@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
                .p2_slow = 10, .p2_fast = 5 },
 };
 
-static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
        .dot = { .min = 20000, .max = 115000 },
        .vco = { .min = 1750000, .max = 3500000 },
        .n = { .min = 1, .max = 3 },
@@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
        },
 };
 
-static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
        .dot = { .min = 80000, .max = 224000 },
        .vco = { .min = 1750000, .max = 3500000 },
        .n = { .min = 1, .max = 3 },
@@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
        },
 };
 
-static const intel_limit_t intel_limits_pineview_sdvo = {
+static const struct intel_limit intel_limits_pineview_sdvo = {
        .dot = { .min = 20000, .max = 400000},
        .vco = { .min = 1700000, .max = 3500000 },
        /* Pineview's Ncounter is a ring counter */
@@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
                .p2_slow = 10, .p2_fast = 5 },
 };
 
-static const intel_limit_t intel_limits_pineview_lvds = {
+static const struct intel_limit intel_limits_pineview_lvds = {
        .dot = { .min = 20000, .max = 400000 },
        .vco = { .min = 1700000, .max = 3500000 },
        .n = { .min = 3, .max = 6 },
@@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = {
  * We calculate clock using (register_value + 2) for N/M1/M2, so here
  * the range value for them is (actual_value - 2).
  */
-static const intel_limit_t intel_limits_ironlake_dac = {
+static const struct intel_limit intel_limits_ironlake_dac = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 5 },
@@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = {
                .p2_slow = 10, .p2_fast = 5 },
 };
 
-static const intel_limit_t intel_limits_ironlake_single_lvds = {
+static const struct intel_limit intel_limits_ironlake_single_lvds = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 3 },
@@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = {
                .p2_slow = 14, .p2_fast = 14 },
 };
 
-static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+static const struct intel_limit intel_limits_ironlake_dual_lvds = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 3 },
@@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = {
 };
 
 /* LVDS 100mhz refclk limits. */
-static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 2 },
@@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
                .p2_slow = 14, .p2_fast = 14 },
 };
 
-static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 1760000, .max = 3510000 },
        .n = { .min = 1, .max = 3 },
@@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
                .p2_slow = 7, .p2_fast = 7 },
 };
 
-static const intel_limit_t intel_limits_vlv = {
+static const struct intel_limit intel_limits_vlv = {
         /*
          * These are the data rate limits (measured in fast clocks)
          * since those are the strictest limits we have. The fast
@@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = {
        .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
 };
 
-static const intel_limit_t intel_limits_chv = {
+static const struct intel_limit intel_limits_chv = {
        /*
         * These are the data rate limits (measured in fast clocks)
         * since those are the strictest limits we have.  The fast
@@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = {
        .p2 = { .p2_slow = 1, .p2_fast = 14 },
 };
 
-static const intel_limit_t intel_limits_bxt = {
+static const struct intel_limit intel_limits_bxt = {
        /* FIXME: find real dot limits */
        .dot = { .min = 0, .max = INT_MAX },
        .vco = { .min = 4800000, .max = 6700000 },
@@ -526,52 +530,6 @@ needs_modeset(struct drm_crtc_state *state)
        return drm_atomic_crtc_needs_modeset(state);
 }
 
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
-{
-       struct drm_device *dev = crtc->base.dev;
-       struct intel_encoder *encoder;
-
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-               if (encoder->type == type)
-                       return true;
-
-       return false;
-}
-
-/**
- * Returns whether any output on the specified pipe will have the specified
- * type after a staged modeset is complete, i.e., the same as
- * intel_pipe_has_type() but looking at encoder->new_crtc instead of
- * encoder->crtc.
- */
-static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
-                                     int type)
-{
-       struct drm_atomic_state *state = crtc_state->base.state;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       struct intel_encoder *encoder;
-       int i, num_connectors = 0;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc_state->base.crtc)
-                       continue;
-
-               num_connectors++;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-               if (encoder->type == type)
-                       return true;
-       }
-
-       WARN_ON(num_connectors == 0);
-
-       return false;
-}
-
 /*
  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -581,7 +539,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
  * divided-down version of it.
  */
 /* m1 is reserved as 0 in Pineview, n is a ring counter */
-static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
 {
        clock->m = clock->m2 + 2;
        clock->p = clock->p1 * clock->p2;
@@ -598,7 +556,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
        return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
 }
 
-static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
 {
        clock->m = i9xx_dpll_compute_m(clock);
        clock->p = clock->p1 * clock->p2;
@@ -610,7 +568,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
        return clock->dot;
 }
 
-static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
+static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
 {
        clock->m = clock->m1 * clock->m2;
        clock->p = clock->p1 * clock->p2;
@@ -622,7 +580,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
        return clock->dot / 5;
 }
 
-int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
+int chv_calc_dpll_params(int refclk, struct dpll *clock)
 {
        clock->m = clock->m1 * clock->m2;
        clock->p = clock->p1 * clock->p2;
@@ -642,8 +600,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
  */
 
 static bool intel_PLL_is_valid(struct drm_device *dev,
-                              const intel_limit_t *limit,
-                              const intel_clock_t *clock)
+                              const struct intel_limit *limit,
+                              const struct dpll *clock)
 {
        if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
                INTELPllInvalid("n out of range\n");
@@ -678,13 +636,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
 }
 
 static int
-i9xx_select_p2_div(const intel_limit_t *limit,
+i9xx_select_p2_div(const struct intel_limit *limit,
                   const struct intel_crtc_state *crtc_state,
                   int target)
 {
        struct drm_device *dev = crtc_state->base.crtc->dev;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                /*
                 * For LVDS just rely on its current settings for dual-channel.
                 * We haven't figured out how to reliably set up different
@@ -713,13 +671,13 @@ i9xx_select_p2_div(const intel_limit_t *limit,
  * divider from @match_clock used for LVDS downclocking.
  */
 static bool
-i9xx_find_best_dpll(const intel_limit_t *limit,
+i9xx_find_best_dpll(const struct intel_limit *limit,
                    struct intel_crtc_state *crtc_state,
-                   int target, int refclk, intel_clock_t *match_clock,
-                   intel_clock_t *best_clock)
+                   int target, int refclk, struct dpll *match_clock,
+                   struct dpll *best_clock)
 {
        struct drm_device *dev = crtc_state->base.crtc->dev;
-       intel_clock_t clock;
+       struct dpll clock;
        int err = target;
 
        memset(best_clock, 0, sizeof(*best_clock));
@@ -770,13 +728,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
  * divider from @match_clock used for LVDS downclocking.
  */
 static bool
-pnv_find_best_dpll(const intel_limit_t *limit,
+pnv_find_best_dpll(const struct intel_limit *limit,
                   struct intel_crtc_state *crtc_state,
-                  int target, int refclk, intel_clock_t *match_clock,
-                  intel_clock_t *best_clock)
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
 {
        struct drm_device *dev = crtc_state->base.crtc->dev;
-       intel_clock_t clock;
+       struct dpll clock;
        int err = target;
 
        memset(best_clock, 0, sizeof(*best_clock));
@@ -825,13 +783,13 @@ pnv_find_best_dpll(const intel_limit_t *limit,
  * divider from @match_clock used for LVDS downclocking.
  */
 static bool
-g4x_find_best_dpll(const intel_limit_t *limit,
+g4x_find_best_dpll(const struct intel_limit *limit,
                   struct intel_crtc_state *crtc_state,
-                  int target, int refclk, intel_clock_t *match_clock,
-                  intel_clock_t *best_clock)
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
 {
        struct drm_device *dev = crtc_state->base.crtc->dev;
-       intel_clock_t clock;
+       struct dpll clock;
        int max_n;
        bool found = false;
        /* approximately equals target * 0.00585 */
@@ -877,8 +835,8 @@ g4x_find_best_dpll(const intel_limit_t *limit,
  * best configuration and error found so far. Return the calculated error.
  */
 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
-                              const intel_clock_t *calculated_clock,
-                              const intel_clock_t *best_clock,
+                              const struct dpll *calculated_clock,
+                              const struct dpll *best_clock,
                               unsigned int best_error_ppm,
                               unsigned int *error_ppm)
 {
@@ -918,14 +876,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  */
 static bool
-vlv_find_best_dpll(const intel_limit_t *limit,
+vlv_find_best_dpll(const struct intel_limit *limit,
                   struct intel_crtc_state *crtc_state,
-                  int target, int refclk, intel_clock_t *match_clock,
-                  intel_clock_t *best_clock)
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_device *dev = crtc->base.dev;
-       intel_clock_t clock;
+       struct dpll clock;
        unsigned int bestppm = 1000000;
        /* min update 19.2 MHz */
        int max_n = min(limit->n.max, refclk / 19200);
@@ -977,15 +935,15 @@ vlv_find_best_dpll(const intel_limit_t *limit,
  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
  */
 static bool
-chv_find_best_dpll(const intel_limit_t *limit,
+chv_find_best_dpll(const struct intel_limit *limit,
                   struct intel_crtc_state *crtc_state,
-                  int target, int refclk, intel_clock_t *match_clock,
-                  intel_clock_t *best_clock)
+                  int target, int refclk, struct dpll *match_clock,
+                  struct dpll *best_clock)
 {
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_device *dev = crtc->base.dev;
        unsigned int best_error_ppm;
-       intel_clock_t clock;
+       struct dpll clock;
        uint64_t m2;
        int found = false;
 
@@ -1035,10 +993,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
 }
 
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
-                       intel_clock_t *best_clock)
+                       struct dpll *best_clock)
 {
        int refclk = 100000;
-       const intel_limit_t *limit = &intel_limits_bxt;
+       const struct intel_limit *limit = &intel_limits_bxt;
 
        return chv_find_best_dpll(limit, crtc_state,
                                  target_clock, refclk, NULL, best_clock);
@@ -1076,7 +1034,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
 
 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t reg = PIPEDSL(pipe);
        u32 line1, line2;
        u32 line_mask;
@@ -1112,7 +1070,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
        enum pipe pipe = crtc->pipe;
 
@@ -1120,8 +1078,9 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
                i915_reg_t reg = PIPECONF(cpu_transcoder);
 
                /* Wait for the Pipe State to go off */
-               if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
-                            100))
+               if (intel_wait_for_register(dev_priv,
+                                           reg, I965_PIPECONF_ACTIVE, 0,
+                                           100))
                        WARN(1, "pipe_off wait timed out\n");
        } else {
                /* Wait for the display line to settle */
@@ -1203,7 +1162,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
        u32 val;
 
        /* ILK FDI PLL is always enabled */
-       if (INTEL_INFO(dev_priv)->gen == 5)
+       if (IS_GEN5(dev_priv))
                return;
 
        /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1230,7 +1189,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
                           enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        i915_reg_t pp_reg;
        u32 val;
        enum pipe panel_pipe = PIPE_A;
@@ -1272,7 +1231,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
 static void assert_cursor(struct drm_i915_private *dev_priv,
                          enum pipe pipe, bool state)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        bool cur_state;
 
        if (IS_845G(dev) || IS_I865G(dev))
@@ -1334,7 +1293,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
                                   enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int i;
 
        /* Primary planes are fixed to pipes on gen4+ */
@@ -1360,7 +1319,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
                                    enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int sprite;
 
        if (INTEL_INFO(dev)->gen >= 9) {
@@ -1540,7 +1499,11 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
        POSTING_READ(DPLL(pipe));
        udelay(150);
 
-       if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+       if (intel_wait_for_register(dev_priv,
+                                   DPLL(pipe),
+                                   DPLL_LOCK_VLV,
+                                   DPLL_LOCK_VLV,
+                                   1))
                DRM_ERROR("DPLL %d failed to lock\n", pipe);
 }
 
@@ -1589,7 +1552,9 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
        I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
 
        /* Check PLL is locked */
-       if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+       if (intel_wait_for_register(dev_priv,
+                                   DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
+                                   1))
                DRM_ERROR("PLL %d failed to lock\n", pipe);
 }
 
@@ -1635,9 +1600,10 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
        struct intel_crtc *crtc;
        int count = 0;
 
-       for_each_intel_crtc(dev, crtc)
+       for_each_intel_crtc(dev, crtc) {
                count += crtc->base.state->active &&
-                       intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
+                       intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
+       }
 
        return count;
 }
@@ -1645,7 +1611,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
 static void i9xx_enable_pll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t reg = DPLL(crtc->pipe);
        u32 dpll = crtc->config->dpll_hw_state.dpll;
 
@@ -1717,12 +1683,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
 static void i9xx_disable_pll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
 
        /* Disable DVO 2x clock on both PLLs if necessary */
        if (IS_I830(dev) &&
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
+           intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
            !intel_num_dvo_pipes(dev)) {
                I915_WRITE(DPLL(PIPE_B),
                           I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1809,7 +1775,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
                BUG();
        }
 
-       if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
+       if (intel_wait_for_register(dev_priv,
+                                   dpll_reg, port_mask, expected_mask,
+                                   1000))
                WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
                     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
 }
@@ -1817,7 +1785,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
                                           enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        i915_reg_t reg;
@@ -1850,7 +1818,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
                 * here for both 8bpc and 12bpc.
                 */
                val &= ~PIPECONF_BPC_MASK;
-               if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
+               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
                        val |= PIPECONF_8BPC;
                else
                        val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1859,7 +1827,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
        val &= ~TRANS_INTERLACE_MASK;
        if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
                if (HAS_PCH_IBX(dev_priv) &&
-                   intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+                   intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
                        val |= TRANS_LEGACY_INTERLACED_ILK;
                else
                        val |= TRANS_INTERLACED;
@@ -1867,7 +1835,9 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
                val |= TRANS_PROGRESSIVE;
 
        I915_WRITE(reg, val | TRANS_ENABLE);
-       if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+       if (intel_wait_for_register(dev_priv,
+                                   reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
+                                   100))
                DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
 }
 
@@ -1895,14 +1865,18 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
                val |= TRANS_PROGRESSIVE;
 
        I915_WRITE(LPT_TRANSCONF, val);
-       if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
+       if (intel_wait_for_register(dev_priv,
+                                   LPT_TRANSCONF,
+                                   TRANS_STATE_ENABLE,
+                                   TRANS_STATE_ENABLE,
+                                   100))
                DRM_ERROR("Failed to enable PCH transcoder\n");
 }
 
 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
                                            enum pipe pipe)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        i915_reg_t reg;
        uint32_t val;
 
@@ -1918,7 +1892,9 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
        val &= ~TRANS_ENABLE;
        I915_WRITE(reg, val);
        /* wait for PCH transcoder off, transcoder state */
-       if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+       if (intel_wait_for_register(dev_priv,
+                                   reg, TRANS_STATE_ENABLE, 0,
+                                   50))
                DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
 
        if (HAS_PCH_CPT(dev)) {
@@ -1938,7 +1914,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
        val &= ~TRANS_ENABLE;
        I915_WRITE(LPT_TRANSCONF, val);
        /* wait for PCH transcoder off, transcoder state */
-       if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
+       if (intel_wait_for_register(dev_priv,
+                                   LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
+                                   50))
                DRM_ERROR("Failed to disable PCH transcoder\n");
 
        /* Workaround: clear timing override bit. */
@@ -1957,7 +1935,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
 static void intel_enable_pipe(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
        enum pipe pch_transcoder;
@@ -1981,7 +1959,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
         * need the check.
         */
        if (HAS_GMCH_DISPLAY(dev_priv))
-               if (crtc->config->has_dsi_encoder)
+               if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
                        assert_dsi_pll_enabled(dev_priv);
                else
                        assert_pll_enabled(dev_priv, pipe);
@@ -2030,7 +2008,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
  */
 static void intel_disable_pipe(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
        enum pipe pipe = crtc->pipe;
        i915_reg_t reg;
@@ -2068,15 +2046,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
                intel_wait_for_pipe_off(crtc);
 }
 
-static bool need_vtd_wa(struct drm_device *dev)
-{
-#ifdef CONFIG_INTEL_IOMMU
-       if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
-               return true;
-#endif
-       return false;
-}
-
 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
 {
        return IS_GEN2(dev_priv) ? 2048 : 4096;
@@ -2241,7 +2210,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
                           unsigned int rotation)
 {
        struct drm_device *dev = fb->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct i915_ggtt_view view;
        u32 alignment;
@@ -2258,7 +2227,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
         * we should always have valid PTE following the scanout preventing
         * the VT-d warning.
         */
-       if (need_vtd_wa(dev) && alignment < 256 * 1024)
+       if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
                alignment = 256 * 1024;
 
        /*
@@ -2309,7 +2278,7 @@ err_pm:
        return ret;
 }
 
-static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
 {
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct i915_ggtt_view view;
@@ -2543,7 +2512,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
                             struct intel_initial_plane_config *plane_config)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
        struct intel_crtc *i;
        struct drm_i915_gem_object *obj;
@@ -2639,7 +2608,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
                                      const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = primary->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2752,7 +2721,7 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
                                       struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int plane = intel_crtc->plane;
 
@@ -2769,7 +2738,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
                                          const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = primary->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2897,7 +2866,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
        I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
@@ -3007,7 +2976,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
                                         const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -3091,7 +3060,7 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
                                          struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = to_intel_crtc(crtc)->pipe;
 
        I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -3110,17 +3079,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        return -ENODEV;
 }
 
-static void intel_complete_page_flips(struct drm_device *dev)
+static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
 {
-       struct drm_crtc *crtc;
-
-       for_each_crtc(dev, crtc) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               enum plane plane = intel_crtc->plane;
+       struct intel_crtc *crtc;
 
-               intel_prepare_page_flip(dev, plane);
-               intel_finish_page_flip_plane(dev, plane);
-       }
+       for_each_intel_crtc(&dev_priv->drm, crtc)
+               intel_finish_page_flip_cs(dev_priv, crtc->pipe);
 }
 
 static void intel_update_primary_planes(struct drm_device *dev)
@@ -3143,41 +3107,39 @@ static void intel_update_primary_planes(struct drm_device *dev)
        }
 }
 
-void intel_prepare_reset(struct drm_device *dev)
+void intel_prepare_reset(struct drm_i915_private *dev_priv)
 {
        /* no reset support for gen2 */
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev_priv))
                return;
 
        /* reset doesn't touch the display */
-       if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
                return;
 
-       drm_modeset_lock_all(dev);
+       drm_modeset_lock_all(&dev_priv->drm);
        /*
         * Disabling the crtcs gracefully seems nicer. Also the
         * g33 docs say we should at least disable all the planes.
         */
-       intel_display_suspend(dev);
+       intel_display_suspend(&dev_priv->drm);
 }
 
-void intel_finish_reset(struct drm_device *dev)
+void intel_finish_reset(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-
        /*
         * Flips in the rings will be nuked by the reset,
         * so complete all pending flips so that user space
         * will get its events and not get stuck.
         */
-       intel_complete_page_flips(dev);
+       intel_complete_page_flips(dev_priv);
 
        /* no reset support for gen2 */
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev_priv))
                return;
 
        /* reset doesn't touch the display */
-       if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
+       if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
                /*
                 * Flips in the rings have been nuked by the reset,
                 * so update the base address of all primary
@@ -3187,7 +3149,7 @@ void intel_finish_reset(struct drm_device *dev)
                 * FIXME: Atomic will make this obsolete since we won't schedule
                 * CS-based flips (which might get lost in gpu resets) any more.
                 */
-               intel_update_primary_planes(dev);
+               intel_update_primary_planes(&dev_priv->drm);
                return;
        }
 
@@ -3198,18 +3160,18 @@ void intel_finish_reset(struct drm_device *dev)
        intel_runtime_pm_disable_interrupts(dev_priv);
        intel_runtime_pm_enable_interrupts(dev_priv);
 
-       intel_modeset_init_hw(dev);
+       intel_modeset_init_hw(&dev_priv->drm);
 
        spin_lock_irq(&dev_priv->irq_lock);
        if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev);
+               dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       intel_display_resume(dev);
+       intel_display_resume(&dev_priv->drm);
 
        intel_hpd_init(dev_priv);
 
-       drm_modeset_unlock_all(dev);
+       drm_modeset_unlock_all(&dev_priv->drm);
 }
 
 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3224,7 +3186,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
                return false;
 
        spin_lock_irq(&dev->event_lock);
-       pending = to_intel_crtc(crtc)->unpin_work != NULL;
+       pending = to_intel_crtc(crtc)->flip_work != NULL;
        spin_unlock_irq(&dev->event_lock);
 
        return pending;
@@ -3234,7 +3196,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
                                     struct intel_crtc_state *old_crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc->base.state);
 
@@ -3275,7 +3237,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
 static void intel_fdi_normal_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3318,7 +3280,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3419,7 +3381,7 @@ static const int snb_b_fdi_train_param[] = {
 static void gen6_fdi_link_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3552,7 +3514,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3671,7 +3633,7 @@ train_done:
 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
        u32 temp;
@@ -3708,7 +3670,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
        u32 temp;
@@ -3738,7 +3700,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
 static void ironlake_fdi_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        i915_reg_t reg;
@@ -3803,7 +3765,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
                if (atomic_read(&crtc->unpin_work_count) == 0)
                        continue;
 
-               if (crtc->unpin_work)
+               if (crtc->flip_work)
                        intel_wait_for_vblank(dev, crtc->pipe);
 
                return true;
@@ -3815,11 +3777,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
 static void page_flip_completed(struct intel_crtc *intel_crtc)
 {
        struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
-       struct intel_unpin_work *work = intel_crtc->unpin_work;
+       struct intel_flip_work *work = intel_crtc->flip_work;
 
-       /* ensure that the unpin work is consistent wrt ->pending. */
-       smp_rmb();
-       intel_crtc->unpin_work = NULL;
+       intel_crtc->flip_work = NULL;
 
        if (work->event)
                drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
@@ -3827,7 +3787,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
        drm_crtc_vblank_put(&intel_crtc->base);
 
        wake_up_all(&dev_priv->pending_flip_queue);
-       queue_work(dev_priv->wq, &work->work);
+       queue_work(dev_priv->wq, &work->unpin_work);
 
        trace_i915_flip_complete(intel_crtc->plane,
                                 work->pending_flip_obj);
@@ -3836,7 +3796,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        long ret;
 
        WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
@@ -3851,9 +3811,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 
        if (ret == 0) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               struct intel_flip_work *work;
 
                spin_lock_irq(&dev->event_lock);
-               if (intel_crtc->unpin_work) {
+               work = intel_crtc->flip_work;
+               if (work && !is_mmio_work(work)) {
                        WARN_ONCE(1, "Removing stuck page flip\n");
                        page_flip_completed(intel_crtc);
                }
@@ -3997,7 +3959,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
                                                enum pipe pch_transcoder)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 
        I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
@@ -4019,7 +3981,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
 
 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t temp;
 
        temp = I915_READ(SOUTH_CHICKEN1);
@@ -4069,7 +4031,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
 
        for_each_encoder_on_crtc(dev, crtc, encoder) {
-               if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+               if (encoder->type == INTEL_OUTPUT_DP ||
                    encoder->type == INTEL_OUTPUT_EDP)
                        return enc_to_dig_port(&encoder->base)->port;
        }
@@ -4088,7 +4050,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
 static void ironlake_pch_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        u32 temp;
@@ -4138,7 +4100,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
        intel_fdi_normal_train(crtc);
 
        /* For PCH DP, enable TRANS_DP_CTL */
-       if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
+       if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
                const struct drm_display_mode *adjusted_mode =
                        &intel_crtc->config->base.adjusted_mode;
                u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4178,7 +4140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
 static void lpt_pch_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
 
@@ -4194,7 +4156,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
 
 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t dslreg = PIPEDSL(pipe);
        u32 temp;
 
@@ -4281,8 +4243,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
        struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
        const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
 
-       DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
-                     intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
+       DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
+                     intel_crtc->base.base.id, intel_crtc->base.name,
+                     intel_crtc->pipe, SKL_CRTC_INDEX);
 
        return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
                &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
@@ -4312,9 +4275,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 
        bool force_detach = !fb || !plane_state->visible;
 
-       DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
-                     intel_plane->base.base.id, intel_crtc->pipe,
-                     drm_plane_index(&intel_plane->base));
+       DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
+                     intel_plane->base.base.id, intel_plane->base.name,
+                     intel_crtc->pipe, drm_plane_index(&intel_plane->base));
 
        ret = skl_update_scaler(crtc_state, force_detach,
                                drm_plane_index(&intel_plane->base),
@@ -4330,8 +4293,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
 
        /* check colorkey */
        if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
-               DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
-                             intel_plane->base.base.id);
+               DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
+                             intel_plane->base.base.id,
+                             intel_plane->base.name);
                return -EINVAL;
        }
 
@@ -4350,8 +4314,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
        case DRM_FORMAT_VYUY:
                break;
        default:
-               DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
-                       intel_plane->base.base.id, fb->base.id, fb->pixel_format);
+               DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+                             intel_plane->base.base.id, intel_plane->base.name,
+                             fb->base.id, fb->pixel_format);
                return -EINVAL;
        }
 
@@ -4369,7 +4334,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
 static void skylake_pfit_enable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
        struct intel_crtc_scaler_state *scaler_state =
                &crtc->config->scaler_state;
@@ -4397,7 +4362,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
 static void ironlake_pfit_enable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
 
        if (crtc->config->pch_pfit.enabled) {
@@ -4418,7 +4383,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
 void hsw_enable_ips(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!crtc->config->ips_enabled)
                return;
@@ -4446,7 +4411,9 @@ void hsw_enable_ips(struct intel_crtc *crtc)
                 * and don't wait for vblanks until the end of crtc_enable, then
                 * the HW state readout code will complain that the expected
                 * IPS_CTL value is not the one we read. */
-               if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+               if (intel_wait_for_register(dev_priv,
+                                           IPS_CTL, IPS_ENABLE, IPS_ENABLE,
+                                           50))
                        DRM_ERROR("Timed out waiting for IPS enable\n");
        }
 }
@@ -4454,7 +4421,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
 void hsw_disable_ips(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!crtc->config->ips_enabled)
                return;
@@ -4465,7 +4432,9 @@ void hsw_disable_ips(struct intel_crtc *crtc)
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
                mutex_unlock(&dev_priv->rps.hw_lock);
                /* wait for pcode to finish disabling IPS, which may take up to 42ms */
-               if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
+               if (intel_wait_for_register(dev_priv,
+                                           IPS_CTL, IPS_ENABLE, 0,
+                                           42))
                        DRM_ERROR("Timed out waiting for IPS disable\n");
        } else {
                I915_WRITE(IPS_CTL, 0);
@@ -4480,7 +4449,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
 {
        if (intel_crtc->overlay) {
                struct drm_device *dev = intel_crtc->base.dev;
-               struct drm_i915_private *dev_priv = dev->dev_private;
+               struct drm_i915_private *dev_priv = to_i915(dev);
 
                mutex_lock(&dev->struct_mutex);
                dev_priv->mm.interruptible = false;
@@ -4508,7 +4477,7 @@ static void
 intel_post_enable_primary(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 
@@ -4540,7 +4509,7 @@ static void
 intel_pre_disable_primary(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 
@@ -4567,7 +4536,7 @@ static void
 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
 
@@ -4626,7 +4595,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
 {
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc->base.state);
        struct drm_atomic_state *old_state = old_crtc_state->base.state;
@@ -4641,14 +4610,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
                struct intel_plane_state *old_primary_state =
                        to_intel_plane_state(old_pri_state);
 
-               intel_fbc_pre_update(crtc);
+               intel_fbc_pre_update(crtc, pipe_config, primary_state);
 
                if (old_primary_state->visible &&
                    (modeset || !primary_state->visible))
                        intel_pre_disable_primary(&crtc->base);
        }
 
-       if (pipe_config->disable_cxsr) {
+       if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
                crtc->wm.cxsr_allowed = false;
 
                /*
@@ -4729,7 +4698,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
@@ -4757,7 +4726,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->config->has_pch_encoder)
                intel_prepare_shared_dpll(intel_crtc);
 
-       if (intel_crtc->config->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
        intel_set_pipe_timings(intel_crtc);
@@ -4826,7 +4795,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
 static void haswell_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe, hsw_workaround_pipe;
@@ -4841,13 +4810,17 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
                                                      false);
 
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->pre_pll_enable)
+                       encoder->pre_pll_enable(encoder);
+
        if (intel_crtc->config->shared_dpll)
                intel_enable_shared_dpll(intel_crtc);
 
-       if (intel_crtc->config->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_set_pipe_timings(intel_crtc);
 
        intel_set_pipe_src_size(intel_crtc);
@@ -4863,7 +4836,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                                     &intel_crtc->config->fdi_m_n, NULL);
        }
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                haswell_set_pipeconf(crtc);
 
        haswell_set_pipemisc(crtc);
@@ -4885,7 +4858,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->config->has_pch_encoder)
                dev_priv->display.fdi_link_train(crtc);
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_enable_pipe_clock(intel_crtc);
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -4900,7 +4873,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        intel_color_load_luts(&pipe_config->base);
 
        intel_ddi_set_pipe_settings(crtc);
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_enable_transcoder_func(crtc);
 
        if (dev_priv->display.initial_watermarks != NULL)
@@ -4909,7 +4882,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                intel_update_watermarks(crtc);
 
        /* XXX: Do the pipe assertions at the right place for BXT DSI. */
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_enable_pipe(intel_crtc);
 
        if (intel_crtc->config->has_pch_encoder)
@@ -4946,7 +4919,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
 
        /* To avoid upsetting the power well on haswell only disable the pfit if
@@ -4961,7 +4934,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
 static void ironlake_crtc_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
@@ -5024,7 +4997,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 static void haswell_crtc_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5042,13 +5015,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        assert_vblank_disabled(crtc);
 
        /* XXX: Do the pipe assertions at the right place for BXT DSI. */
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_disable_pipe(intel_crtc);
 
        if (intel_crtc->config->dp_encoder_is_mst)
                intel_ddi_set_vc_payload_alloc(crtc, false);
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
        if (INTEL_INFO(dev)->gen >= 9)
@@ -5056,7 +5029,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        else
                ironlake_pfit_disable(intel_crtc, false);
 
-       if (!intel_crtc->config->has_dsi_encoder)
+       if (!transcoder_is_dsi(cpu_transcoder))
                intel_ddi_disable_pipe_clock(intel_crtc);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -5076,7 +5049,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *pipe_config = crtc->config;
 
        if (!pipe_config->gmch_pfit.control)
@@ -5146,7 +5119,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
        case INTEL_OUTPUT_UNKNOWN:
                /* Only DDI platforms should ever use this output type */
                WARN_ON_ONCE(!HAS_DDI(dev));
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_HDMI:
        case INTEL_OUTPUT_EDP:
                intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5180,7 +5153,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
                 * run the DP detection too.
                 */
                WARN_ON_ONCE(!HAS_DDI(dev));
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_EDP:
                intel_dig_port = enc_to_dig_port(&intel_encoder->base);
                return port_to_aux_power_domain(intel_dig_port->port);
@@ -5228,7 +5201,7 @@ static unsigned long
 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
                               struct intel_crtc_state *crtc_state)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum intel_display_power_domain domain;
        unsigned long domains, new_domains, old_domains;
@@ -5269,21 +5242,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
                return max_cdclk_freq*90/100;
 }
 
+static int skl_calc_cdclk(int max_pixclk, int vco);
+
 static void intel_update_max_cdclk(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
                u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+               int max_cdclk, vco;
+
+               vco = dev_priv->skl_preferred_vco_freq;
+               WARN_ON(vco != 8100000 && vco != 8640000);
 
+               /*
+                * Use the lower (vco 8640) cdclk values as a
+                * first guess. skl_calc_cdclk() will correct it
+                * if the preferred vco is 8100 instead.
+                */
                if (limit == SKL_DFSM_CDCLK_LIMIT_675)
-                       dev_priv->max_cdclk_freq = 675000;
+                       max_cdclk = 617143;
                else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
-                       dev_priv->max_cdclk_freq = 540000;
+                       max_cdclk = 540000;
                else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
-                       dev_priv->max_cdclk_freq = 450000;
+                       max_cdclk = 432000;
                else
-                       dev_priv->max_cdclk_freq = 337500;
+                       max_cdclk = 308571;
+
+               dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
        } else if (IS_BROXTON(dev)) {
                dev_priv->max_cdclk_freq = 624000;
        } else if (IS_BROADWELL(dev))  {
@@ -5321,267 +5307,322 @@ static void intel_update_max_cdclk(struct drm_device *dev)
 
 static void intel_update_cdclk(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-       DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
-                        dev_priv->cdclk_freq);
+
+       if (INTEL_GEN(dev_priv) >= 9)
+               DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
+                                dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
+                                dev_priv->cdclk_pll.ref);
+       else
+               DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
+                                dev_priv->cdclk_freq);
 
        /*
-        * Program the gmbus_freq based on the cdclk frequency.
-        * BSpec erroneously claims we should aim for 4MHz, but
-        * in fact 1MHz is the correct frequency.
+        * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
+        * Programmng [sic] note: bit[9:2] should be programmed to the number
+        * of cdclk that generates 4MHz reference clock freq which is used to
+        * generate GMBus clock. This will vary with the cdclk freq.
         */
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
-               /*
-                * Program the gmbus_freq based on the cdclk frequency.
-                * BSpec erroneously claims we should aim for 4MHz, but
-                * in fact 1MHz is the correct frequency.
-                */
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
-       }
+}
 
-       if (dev_priv->max_cdclk_freq == 0)
-               intel_update_max_cdclk(dev);
+/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+static int skl_cdclk_decimal(int cdclk)
+{
+       return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
 }
 
-static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
+static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
 {
-       uint32_t divider;
-       uint32_t ratio;
-       uint32_t current_freq;
-       int ret;
+       int ratio;
 
-       /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
-       switch (frequency) {
+       if (cdclk == dev_priv->cdclk_pll.ref)
+               return 0;
+
+       switch (cdclk) {
+       default:
+               MISSING_CASE(cdclk);
        case 144000:
+       case 288000:
+       case 384000:
+       case 576000:
+               ratio = 60;
+               break;
+       case 624000:
+               ratio = 65;
+               break;
+       }
+
+       return dev_priv->cdclk_pll.ref * ratio;
+}
+
+static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+
+       /* Timeout 200us */
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
+                                   1))
+               DRM_ERROR("timeout waiting for DE PLL unlock\n");
+
+       dev_priv->cdclk_pll.vco = 0;
+}
+
+static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
+{
+       int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
+       u32 val;
+
+       val = I915_READ(BXT_DE_PLL_CTL);
+       val &= ~BXT_DE_PLL_RATIO_MASK;
+       val |= BXT_DE_PLL_RATIO(ratio);
+       I915_WRITE(BXT_DE_PLL_CTL, val);
+
+       I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+
+       /* Timeout 200us */
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_DE_PLL_ENABLE,
+                                   BXT_DE_PLL_LOCK,
+                                   BXT_DE_PLL_LOCK,
+                                   1))
+               DRM_ERROR("timeout waiting for DE PLL lock\n");
+
+       dev_priv->cdclk_pll.vco = vco;
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
+{
+       u32 val, divider;
+       int vco, ret;
+
+       vco = bxt_de_pll_vco(dev_priv, cdclk);
+
+       DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
+
+       /* cdclk = vco / 2 / div{1,1.5,2,4} */
+       switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+       case 8:
                divider = BXT_CDCLK_CD2X_DIV_SEL_4;
-               ratio = BXT_DE_PLL_RATIO(60);
                break;
-       case 288000:
+       case 4:
                divider = BXT_CDCLK_CD2X_DIV_SEL_2;
-               ratio = BXT_DE_PLL_RATIO(60);
                break;
-       case 384000:
+       case 3:
                divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
-               ratio = BXT_DE_PLL_RATIO(60);
                break;
-       case 576000:
-               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
-               ratio = BXT_DE_PLL_RATIO(60);
-               break;
-       case 624000:
+       case 2:
                divider = BXT_CDCLK_CD2X_DIV_SEL_1;
-               ratio = BXT_DE_PLL_RATIO(65);
-               break;
-       case 19200:
-               /*
-                * Bypass frequency with DE PLL disabled. Init ratio, divider
-                * to suppress GCC warning.
-                */
-               ratio = 0;
-               divider = 0;
                break;
        default:
-               DRM_ERROR("unsupported CDCLK freq %d", frequency);
+               WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
+               WARN_ON(vco != 0);
 
-               return;
+               divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+               break;
        }
 
-       mutex_lock(&dev_priv->rps.hw_lock);
        /* Inform power controller of upcoming frequency change */
+       mutex_lock(&dev_priv->rps.hw_lock);
        ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
                                      0x80000000);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        if (ret) {
                DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
-                         ret, frequency);
+                         ret, cdclk);
                return;
        }
 
-       current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
-       /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
-       current_freq = current_freq * 500 + 1000;
+       if (dev_priv->cdclk_pll.vco != 0 &&
+           dev_priv->cdclk_pll.vco != vco)
+               bxt_de_pll_disable(dev_priv);
 
-       /*
-        * DE PLL has to be disabled when
-        * - setting to 19.2MHz (bypass, PLL isn't used)
-        * - before setting to 624MHz (PLL needs toggling)
-        * - before setting to any frequency from 624MHz (PLL needs toggling)
-        */
-       if (frequency == 19200 || frequency == 624000 ||
-           current_freq == 624000) {
-               I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
-               /* Timeout 200us */
-               if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
-                            1))
-                       DRM_ERROR("timout waiting for DE PLL unlock\n");
-       }
-
-       if (frequency != 19200) {
-               uint32_t val;
-
-               val = I915_READ(BXT_DE_PLL_CTL);
-               val &= ~BXT_DE_PLL_RATIO_MASK;
-               val |= ratio;
-               I915_WRITE(BXT_DE_PLL_CTL, val);
-
-               I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
-               /* Timeout 200us */
-               if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
-                       DRM_ERROR("timeout waiting for DE PLL lock\n");
-
-               val = I915_READ(CDCLK_CTL);
-               val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
-               val |= divider;
-               /*
-                * Disable SSA Precharge when CD clock frequency < 500 MHz,
-                * enable otherwise.
-                */
-               val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-               if (frequency >= 500000)
-                       val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+       if (dev_priv->cdclk_pll.vco != vco)
+               bxt_de_pll_enable(dev_priv, vco);
 
-               val &= ~CDCLK_FREQ_DECIMAL_MASK;
-               /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
-               val |= (frequency - 1000) / 500;
-               I915_WRITE(CDCLK_CTL, val);
-       }
+       val = divider | skl_cdclk_decimal(cdclk);
+       /*
+        * FIXME if only the cd2x divider needs changing, it could be done
+        * without shutting off the pipe (if only one pipe is active).
+        */
+       val |= BXT_CDCLK_CD2X_PIPE_NONE;
+       /*
+        * Disable SSA Precharge when CD clock frequency < 500 MHz,
+        * enable otherwise.
+        */
+       if (cdclk >= 500000)
+               val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+       I915_WRITE(CDCLK_CTL, val);
 
        mutex_lock(&dev_priv->rps.hw_lock);
        ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-                                     DIV_ROUND_UP(frequency, 25000));
+                                     DIV_ROUND_UP(cdclk, 25000));
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        if (ret) {
                DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
-                         ret, frequency);
+                         ret, cdclk);
                return;
        }
 
-       intel_update_cdclk(dev_priv->dev);
+       intel_update_cdclk(&dev_priv->drm);
 }
 
-static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
 {
-       if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
-               return false;
+       u32 cdctl, expected;
 
-       /* TODO: Check for a valid CDCLK rate */
+       intel_update_cdclk(&dev_priv->drm);
 
-       if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
-               DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
+       if (dev_priv->cdclk_pll.vco == 0 ||
+           dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
+               goto sanitize;
 
-               return false;
-       }
+       /* DPLL okay; verify the cdclock
+        *
+        * Some BIOS versions leave an incorrect decimal frequency value and
+        * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+        * so sanitize this register.
+        */
+       cdctl = I915_READ(CDCLK_CTL);
+       /*
+        * Let's ignore the pipe field, since BIOS could have configured the
+        * dividers both synching to an active pipe, or asynchronously
+        * (PIPE_NONE).
+        */
+       cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
 
-       if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
-               DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
+       expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
+                  skl_cdclk_decimal(dev_priv->cdclk_freq);
+       /*
+        * Disable SSA Precharge when CD clock frequency < 500 MHz,
+        * enable otherwise.
+        */
+       if (dev_priv->cdclk_freq >= 500000)
+               expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
 
-               return false;
-       }
+       if (cdctl == expected)
+               /* All well; nothing to sanitize */
+               return;
 
-       return true;
-}
+sanitize:
+       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
 
-bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
-{
-       return broxton_cdclk_is_enabled(dev_priv);
+       /* force cdclk programming */
+       dev_priv->cdclk_freq = 0;
+
+       /* force full PLL disable + enable */
+       dev_priv->cdclk_pll.vco = -1;
 }
 
-void broxton_init_cdclk(struct drm_i915_private *dev_priv)
+void bxt_init_cdclk(struct drm_i915_private *dev_priv)
 {
-       /* check if cd clock is enabled */
-       if (broxton_cdclk_is_enabled(dev_priv)) {
-               DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
-               return;
-       }
+       bxt_sanitize_cdclk(dev_priv);
 
-       DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
+       if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
+               return;
 
        /*
         * FIXME:
         * - The initial CDCLK needs to be read from VBT.
         *   Need to make this change after VBT has changes for BXT.
-        * - check if setting the max (or any) cdclk freq is really necessary
-        *   here, it belongs to modeset time
         */
-       broxton_set_cdclk(dev_priv, 624000);
-
-       I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
-       POSTING_READ(DBUF_CTL);
+       bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
+}
 
-       udelay(10);
+void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+       bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
+}
 
-       if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
-               DRM_ERROR("DBuf power enable timeout!\n");
+static int skl_calc_cdclk(int max_pixclk, int vco)
+{
+       if (vco == 8640000) {
+               if (max_pixclk > 540000)
+                       return 617143;
+               else if (max_pixclk > 432000)
+                       return 540000;
+               else if (max_pixclk > 308571)
+                       return 432000;
+               else
+                       return 308571;
+       } else {
+               if (max_pixclk > 540000)
+                       return 675000;
+               else if (max_pixclk > 450000)
+                       return 540000;
+               else if (max_pixclk > 337500)
+                       return 450000;
+               else
+                       return 337500;
+       }
 }
 
-void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void
+skl_dpll0_update(struct drm_i915_private *dev_priv)
 {
-       I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
-       POSTING_READ(DBUF_CTL);
+       u32 val;
 
-       udelay(10);
+       dev_priv->cdclk_pll.ref = 24000;
+       dev_priv->cdclk_pll.vco = 0;
 
-       if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
-               DRM_ERROR("DBuf power disable timeout!\n");
+       val = I915_READ(LCPLL1_CTL);
+       if ((val & LCPLL_PLL_ENABLE) == 0)
+               return;
 
-       /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
-       broxton_set_cdclk(dev_priv, 19200);
-}
+       if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+               return;
 
-static const struct skl_cdclk_entry {
-       unsigned int freq;
-       unsigned int vco;
-} skl_cdclk_frequencies[] = {
-       { .freq = 308570, .vco = 8640 },
-       { .freq = 337500, .vco = 8100 },
-       { .freq = 432000, .vco = 8640 },
-       { .freq = 450000, .vco = 8100 },
-       { .freq = 540000, .vco = 8100 },
-       { .freq = 617140, .vco = 8640 },
-       { .freq = 675000, .vco = 8100 },
-};
+       val = I915_READ(DPLL_CTRL1);
 
-static unsigned int skl_cdclk_decimal(unsigned int freq)
-{
-       return (freq - 1000) / 500;
+       if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+                           DPLL_CTRL1_SSC(SKL_DPLL0) |
+                           DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+                   DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+               return;
+
+       switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
+               dev_priv->cdclk_pll.vco = 8100000;
+               break;
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
+       case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
+               dev_priv->cdclk_pll.vco = 8640000;
+               break;
+       default:
+               MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+               break;
+       }
 }
 
-static unsigned int skl_cdclk_get_vco(unsigned int freq)
+void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
 {
-       unsigned int i;
+       bool changed = dev_priv->skl_preferred_vco_freq != vco;
 
-       for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
-               const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
+       dev_priv->skl_preferred_vco_freq = vco;
 
-               if (e->freq == freq)
-                       return e->vco;
-       }
-
-       return 8100;
+       if (changed)
+               intel_update_max_cdclk(&dev_priv->drm);
 }
 
 static void
-skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
+skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
 {
-       unsigned int min_freq;
+       int min_cdclk = skl_calc_cdclk(0, vco);
        u32 val;
 
-       /* select the minimum CDCLK before enabling DPLL 0 */
-       val = I915_READ(CDCLK_CTL);
-       val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
-       val |= CDCLK_FREQ_337_308;
-
-       if (required_vco == 8640)
-               min_freq = 308570;
-       else
-               min_freq = 337500;
-
-       val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
+       WARN_ON(vco != 8100000 && vco != 8640000);
 
+       /* select the minimum CDCLK before enabling DPLL 0 */
+       val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
        I915_WRITE(CDCLK_CTL, val);
        POSTING_READ(CDCLK_CTL);
 
@@ -5592,14 +5633,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
         * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
         * The modeset code is responsible for the selection of the exact link
         * rate later on, with the constraint of choosing a frequency that
-        * works with required_vco.
+        * works with vco.
         */
        val = I915_READ(DPLL_CTRL1);
 
        val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
                 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
        val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
-       if (required_vco == 8640)
+       if (vco == 8640000)
                val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
                                            SKL_DPLL0);
        else
@@ -5611,8 +5652,27 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
 
        I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
 
-       if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
+       if (intel_wait_for_register(dev_priv,
+                                   LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+                                   5))
                DRM_ERROR("DPLL0 not locked\n");
+
+       dev_priv->cdclk_pll.vco = vco;
+
+       /* We'll want to keep using the current vco from now on. */
+       skl_set_preferred_cdclk_vco(dev_priv, vco);
+}
+
+static void
+skl_dpll0_disable(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+       if (intel_wait_for_register(dev_priv,
+                                  LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+                                  1))
+               DRM_ERROR("Couldn't disable DPLL0\n");
+
+       dev_priv->cdclk_pll.vco = 0;
 }
 
 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
@@ -5642,12 +5702,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
        return false;
 }
 
-static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
+static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        u32 freq_select, pcu_ack;
 
-       DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
+       WARN_ON((cdclk == 24000) != (vco == 0));
+
+       DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
 
        if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
                DRM_ERROR("failed to inform PCU about cdclk change\n");
@@ -5655,7 +5717,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
        }
 
        /* set CDCLK_CTL */
-       switch(freq) {
+       switch (cdclk) {
        case 450000:
        case 432000:
                freq_select = CDCLK_FREQ_450_432;
@@ -5665,20 +5727,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
                freq_select = CDCLK_FREQ_540;
                pcu_ack = 2;
                break;
-       case 308570:
+       case 308571:
        case 337500:
        default:
                freq_select = CDCLK_FREQ_337_308;
                pcu_ack = 0;
                break;
-       case 617140:
+       case 617143:
        case 675000:
                freq_select = CDCLK_FREQ_675_617;
                pcu_ack = 3;
                break;
        }
 
-       I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
+       if (dev_priv->cdclk_pll.vco != 0 &&
+           dev_priv->cdclk_pll.vco != vco)
+               skl_dpll0_disable(dev_priv);
+
+       if (dev_priv->cdclk_pll.vco != vco)
+               skl_dpll0_enable(dev_priv, vco);
+
+       I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
        POSTING_READ(CDCLK_CTL);
 
        /* inform PCU of the change */
@@ -5689,52 +5758,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
        intel_update_cdclk(dev);
 }
 
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
+
 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
 {
-       /* disable DBUF power */
-       I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
-       POSTING_READ(DBUF_CTL);
-
-       udelay(10);
-
-       if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
-               DRM_ERROR("DBuf power disable timeout\n");
-
-       /* disable DPLL0 */
-       I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
-       if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
-               DRM_ERROR("Couldn't disable DPLL0\n");
+       skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
 }
 
 void skl_init_cdclk(struct drm_i915_private *dev_priv)
 {
-       unsigned int required_vco;
-
-       /* DPLL0 not enabled (happens on early BIOS versions) */
-       if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
-               /* enable DPLL0 */
-               required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
-               skl_dpll0_enable(dev_priv, required_vco);
-       }
+       int cdclk, vco;
 
-       /* set CDCLK to the frequency the BIOS chose */
-       skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
+       skl_sanitize_cdclk(dev_priv);
 
-       /* enable DBUF power */
-       I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
-       POSTING_READ(DBUF_CTL);
+       if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
+               /*
+                * Use the current vco as our initial
+                * guess as to what the preferred vco is.
+                */
+               if (dev_priv->skl_preferred_vco_freq == 0)
+                       skl_set_preferred_cdclk_vco(dev_priv,
+                                                   dev_priv->cdclk_pll.vco);
+               return;
+       }
 
-       udelay(10);
+       vco = dev_priv->skl_preferred_vco_freq;
+       if (vco == 0)
+               vco = 8100000;
+       cdclk = skl_calc_cdclk(0, vco);
 
-       if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
-               DRM_ERROR("DBuf power enable timeout\n");
+       skl_set_cdclk(dev_priv, cdclk, vco);
 }
 
-int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
 {
-       uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
-       uint32_t cdctl = I915_READ(CDCLK_CTL);
-       int freq = dev_priv->skl_boot_cdclk;
+       uint32_t cdctl, expected;
 
        /*
         * check if the pre-os intialized the display
@@ -5744,8 +5802,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
        if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
                goto sanitize;
 
+       intel_update_cdclk(&dev_priv->drm);
        /* Is PLL enabled and locked ? */
-       if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
+       if (dev_priv->cdclk_pll.vco == 0 ||
+           dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
                goto sanitize;
 
        /* DPLL okay; verify the cdclock
@@ -5754,25 +5814,26 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
         * decimal part is programmed wrong from BIOS where pre-os does not
         * enable display. Verify the same as well.
         */
-       if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
+       cdctl = I915_READ(CDCLK_CTL);
+       expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
+               skl_cdclk_decimal(dev_priv->cdclk_freq);
+       if (cdctl == expected)
                /* All well; nothing to sanitize */
-               return false;
+               return;
+
 sanitize:
-       /*
-        * As of now initialize with max cdclk till
-        * we get dynamic cdclk support
-        * */
-       dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
-       skl_init_cdclk(dev_priv);
+       DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
 
-       /* we did have to sanitize */
-       return true;
+       /* force cdclk programming */
+       dev_priv->cdclk_freq = 0;
+       /* force full PLL disable + enable */
+       dev_priv->cdclk_pll.vco = -1;
 }
 
 /* Adjust CDclk dividers to allow high res or save power if possible */
 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, cmd;
 
        WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5837,7 +5898,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
 
 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, cmd;
 
        WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5906,21 +5967,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
                return 200000;
 }
 
-static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
-                             int max_pixclk)
+static int bxt_calc_cdclk(int max_pixclk)
 {
-       /*
-        * FIXME:
-        * - remove the guardband, it's not needed on BXT
-        * - set 19.2MHz bypass frequency if there are no active pipes
-        */
-       if (max_pixclk > 576000*9/10)
+       if (max_pixclk > 576000)
                return 624000;
-       else if (max_pixclk > 384000*9/10)
+       else if (max_pixclk > 384000)
                return 576000;
-       else if (max_pixclk > 288000*9/10)
+       else if (max_pixclk > 288000)
                return 384000;
-       else if (max_pixclk > 144000*9/10)
+       else if (max_pixclk > 144000)
                return 288000;
        else
                return 144000;
@@ -5931,7 +5986,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
                                 struct drm_atomic_state *state)
 {
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
        unsigned max_pixclk = 0, i;
@@ -5958,14 +6013,11 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int max_pixclk = intel_mode_max_pixclk(dev, state);
        struct intel_atomic_state *intel_state =
                to_intel_atomic_state(state);
 
-       if (max_pixclk < 0)
-               return max_pixclk;
-
        intel_state->cdclk = intel_state->dev_cdclk =
                valleyview_calc_cdclk(dev_priv, max_pixclk);
 
@@ -5975,22 +6027,17 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
        return 0;
 }
 
-static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
+static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
-       struct drm_device *dev = state->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int max_pixclk = intel_mode_max_pixclk(dev, state);
+       int max_pixclk = ilk_max_pixel_rate(state);
        struct intel_atomic_state *intel_state =
                to_intel_atomic_state(state);
 
-       if (max_pixclk < 0)
-               return max_pixclk;
-
        intel_state->cdclk = intel_state->dev_cdclk =
-               broxton_calc_cdclk(dev_priv, max_pixclk);
+               bxt_calc_cdclk(max_pixclk);
 
        if (!intel_state->active_crtcs)
-               intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
+               intel_state->dev_cdclk = bxt_calc_cdclk(0);
 
        return 0;
 }
@@ -6034,7 +6081,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_atomic_state *old_intel_state =
                to_intel_atomic_state(old_state);
        unsigned req_cdclk = old_intel_state->dev_cdclk;
@@ -6073,14 +6120,14 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        if (WARN_ON(intel_crtc->active))
                return;
 
-       if (intel_crtc->config->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
        intel_set_pipe_timings(intel_crtc);
        intel_set_pipe_src_size(intel_crtc);
 
        if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
-               struct drm_i915_private *dev_priv = dev->dev_private;
+               struct drm_i915_private *dev_priv = to_i915(dev);
 
                I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
                I915_WRITE(CHV_CANVAS(pipe), 0);
@@ -6125,7 +6172,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
        I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
@@ -6146,7 +6193,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 
        i9xx_set_pll_dividers(intel_crtc);
 
-       if (intel_crtc->config->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(intel_crtc->config))
                intel_dp_set_m_n(intel_crtc, M1_N1);
 
        intel_set_pipe_timings(intel_crtc);
@@ -6182,7 +6229,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!crtc->config->gmch_pfit.control)
                return;
@@ -6197,7 +6244,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
 static void i9xx_crtc_disable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
@@ -6223,7 +6270,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
                if (encoder->post_disable)
                        encoder->post_disable(encoder);
 
-       if (!intel_crtc->config->has_dsi_encoder) {
+       if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
                if (IS_CHERRYVIEW(dev))
                        chv_disable_pll(dev_priv, pipe);
                else if (IS_VALLEYVIEW(dev))
@@ -6252,7 +6299,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
                return;
 
        if (to_intel_plane_state(crtc->primary->state)->visible) {
-               WARN_ON(intel_crtc->unpin_work);
+               WARN_ON(intel_crtc->flip_work);
 
                intel_pre_disable_primary_noatomic(crtc);
 
@@ -6262,8 +6309,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 
        dev_priv->display.crtc_disable(crtc);
 
-       DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
-                     crtc->base.id);
+       DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+                     crtc->base.id, crtc->name);
 
        WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
        crtc->state->active = false;
@@ -6541,7 +6588,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
                                   struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        pipe_config->ips_enabled = i915.enable_ips &&
                hsw_crtc_supports_ips(crtc) &&
@@ -6561,12 +6608,12 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
                                     struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+       int clock_limit = dev_priv->max_dotclk_freq;
 
-       /* FIXME should check pixel clock limits on all platforms */
        if (INTEL_INFO(dev)->gen < 4) {
-               int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
+               clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
 
                /*
                 * Enable double wide mode when the dot clock
@@ -6574,16 +6621,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
                 */
                if (intel_crtc_supports_double_wide(crtc) &&
                    adjusted_mode->crtc_clock > clock_limit) {
-                       clock_limit *= 2;
+                       clock_limit = dev_priv->max_dotclk_freq;
                        pipe_config->double_wide = true;
                }
+       }
 
-               if (adjusted_mode->crtc_clock > clock_limit) {
-                       DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
-                                     adjusted_mode->crtc_clock, clock_limit,
-                                     yesno(pipe_config->double_wide));
-                       return -EINVAL;
-               }
+       if (adjusted_mode->crtc_clock > clock_limit) {
+               DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+                             adjusted_mode->crtc_clock, clock_limit,
+                             yesno(pipe_config->double_wide));
+               return -EINVAL;
        }
 
        /*
@@ -6592,7 +6639,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
         * - LVDS dual channel mode
         * - Double wide pipe
         */
-       if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
+       if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
             intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
                pipe_config->pipe_src_w &= ~1;
 
@@ -6615,81 +6662,103 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
 static int skylake_get_display_clock_speed(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
-       uint32_t cdctl = I915_READ(CDCLK_CTL);
-       uint32_t linkrate;
+       uint32_t cdctl;
 
-       if (!(lcpll1 & LCPLL_PLL_ENABLE))
-               return 24000; /* 24MHz is the cd freq with NSSC ref */
+       skl_dpll0_update(dev_priv);
 
-       if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
-               return 540000;
+       if (dev_priv->cdclk_pll.vco == 0)
+               return dev_priv->cdclk_pll.ref;
 
-       linkrate = (I915_READ(DPLL_CTRL1) &
-                   DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+       cdctl = I915_READ(CDCLK_CTL);
 
-       if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
-           linkrate == DPLL_CTRL1_LINK_RATE_1080) {
-               /* vco 8640 */
+       if (dev_priv->cdclk_pll.vco == 8640000) {
                switch (cdctl & CDCLK_FREQ_SEL_MASK) {
                case CDCLK_FREQ_450_432:
                        return 432000;
                case CDCLK_FREQ_337_308:
-                       return 308570;
+                       return 308571;
+               case CDCLK_FREQ_540:
+                       return 540000;
                case CDCLK_FREQ_675_617:
-                       return 617140;
+                       return 617143;
                default:
-                       WARN(1, "Unknown cd freq selection\n");
+                       MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
                }
        } else {
-               /* vco 8100 */
                switch (cdctl & CDCLK_FREQ_SEL_MASK) {
                case CDCLK_FREQ_450_432:
                        return 450000;
                case CDCLK_FREQ_337_308:
                        return 337500;
+               case CDCLK_FREQ_540:
+                       return 540000;
                case CDCLK_FREQ_675_617:
                        return 675000;
                default:
-                       WARN(1, "Unknown cd freq selection\n");
+                       MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
                }
        }
 
-       /* error case, do as if DPLL0 isn't enabled */
-       return 24000;
+       return dev_priv->cdclk_pll.ref;
+}
+
+static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
+{
+       u32 val;
+
+       dev_priv->cdclk_pll.ref = 19200;
+       dev_priv->cdclk_pll.vco = 0;
+
+       val = I915_READ(BXT_DE_PLL_ENABLE);
+       if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+               return;
+
+       if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
+               return;
+
+       val = I915_READ(BXT_DE_PLL_CTL);
+       dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
+               dev_priv->cdclk_pll.ref;
 }
 
 static int broxton_get_display_clock_speed(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       uint32_t cdctl = I915_READ(CDCLK_CTL);
-       uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
-       uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
-       int cdclk;
+       u32 divider;
+       int div, vco;
 
-       if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
-               return 19200;
+       bxt_de_pll_update(dev_priv);
 
-       cdclk = 19200 * pll_ratio / 2;
+       vco = dev_priv->cdclk_pll.vco;
+       if (vco == 0)
+               return dev_priv->cdclk_pll.ref;
+
+       divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
 
-       switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
+       switch (divider) {
        case BXT_CDCLK_CD2X_DIV_SEL_1:
-               return cdclk;  /* 576MHz or 624MHz */
+               div = 2;
+               break;
        case BXT_CDCLK_CD2X_DIV_SEL_1_5:
-               return cdclk * 2 / 3; /* 384MHz */
+               div = 3;
+               break;
        case BXT_CDCLK_CD2X_DIV_SEL_2:
-               return cdclk / 2; /* 288MHz */
+               div = 4;
+               break;
        case BXT_CDCLK_CD2X_DIV_SEL_4:
-               return cdclk / 4; /* 144MHz */
+               div = 8;
+               break;
+       default:
+               MISSING_CASE(divider);
+               return dev_priv->cdclk_pll.ref;
        }
 
-       /* error case, do as if DE PLL isn't enabled */
-       return 19200;
+       return DIV_ROUND_CLOSEST(vco, div);
 }
 
 static int broadwell_get_display_clock_speed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t lcpll = I915_READ(LCPLL_CTL);
        uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
@@ -6709,7 +6778,7 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
 
 static int haswell_get_display_clock_speed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t lcpll = I915_READ(LCPLL_CTL);
        uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
@@ -6843,7 +6912,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
 
 static unsigned int intel_hpll_vco(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        static const unsigned int blb_vco[8] = {
                [0] = 3200000,
                [1] = 4000000,
@@ -7063,7 +7132,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
 
 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
                                     struct intel_crtc_state *crtc_state,
-                                    intel_clock_t *reduced_clock)
+                                    struct dpll *reduced_clock)
 {
        struct drm_device *dev = crtc->base.dev;
        u32 fp, fp2 = 0;
@@ -7081,7 +7150,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
        crtc_state->dpll_hw_state.fp0 = fp;
 
        crtc->lowfreq_avail = false;
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
            reduced_clock) {
                crtc_state->dpll_hw_state.fp1 = fp2;
                crtc->lowfreq_avail = true;
@@ -7123,7 +7192,7 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
                                         struct intel_link_m_n *m_n)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
 
        I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -7137,7 +7206,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
                                         struct intel_link_m_n *m2_n2)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = crtc->pipe;
        enum transcoder transcoder = crtc->config->cpu_transcoder;
 
@@ -7200,7 +7269,7 @@ static void vlv_compute_dpll(struct intel_crtc *crtc,
                pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
        /* DPLL not used with DSI, but still need the rest set up */
-       if (!pipe_config->has_dsi_encoder)
+       if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
                pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
                        DPLL_EXT_BUFFER_ENABLE_VLV;
 
@@ -7217,7 +7286,7 @@ static void chv_compute_dpll(struct intel_crtc *crtc,
                pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
        /* DPLL not used with DSI, but still need the rest set up */
-       if (!pipe_config->has_dsi_encoder)
+       if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
                pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
 
        pipe_config->dpll_hw_state.dpll_md =
@@ -7228,7 +7297,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
        u32 mdiv;
        u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -7287,15 +7356,15 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 
        /* Set HBR and RBR LPF coefficients */
        if (pipe_config->port_clock == 162000 ||
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+           intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
+           intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x009f0003);
        else
                vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
                                 0x00d0000f);
 
-       if (pipe_config->has_dp_encoder) {
+       if (intel_crtc_has_dp_encoder(pipe_config)) {
                /* Use SSC source */
                if (pipe == PIPE_A)
                        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -7315,8 +7384,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
 
        coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
+       if (intel_crtc_has_dp_encoder(crtc->config))
                coreclk |= 0x01000000;
        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
@@ -7328,7 +7396,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
                            const struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
        enum dpio_channel port = vlv_pipe_to_channel(pipe);
        u32 loopfilter, tribuf_calcntr;
@@ -7487,22 +7555,18 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
 
 static void i9xx_compute_dpll(struct intel_crtc *crtc,
                              struct intel_crtc_state *crtc_state,
-                             intel_clock_t *reduced_clock)
+                             struct dpll *reduced_clock)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dpll;
-       bool is_sdvo;
        struct dpll *clock = &crtc_state->dpll;
 
        i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
 
-       is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
-               intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
-
        dpll = DPLL_VGA_MODE_DIS;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
                dpll |= DPLLB_MODE_LVDS;
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -7512,10 +7576,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
                        << SDVO_MULTIPLIER_SHIFT_HIRES;
        }
 
-       if (is_sdvo)
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
                dpll |= DPLL_SDVO_HIGH_SPEED;
 
-       if (crtc_state->has_dp_encoder)
+       if (intel_crtc_has_dp_encoder(crtc_state))
                dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
@@ -7545,7 +7610,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
 
        if (crtc_state->sdvo_tv_clock)
                dpll |= PLL_REF_INPUT_TVCLKINBC;
-       else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+       else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
                 intel_panel_use_ssc(dev_priv))
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
@@ -7563,10 +7628,10 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
 
 static void i8xx_compute_dpll(struct intel_crtc *crtc,
                              struct intel_crtc_state *crtc_state,
-                             intel_clock_t *reduced_clock)
+                             struct dpll *reduced_clock)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dpll;
        struct dpll *clock = &crtc_state->dpll;
 
@@ -7574,7 +7639,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
 
        dpll = DPLL_VGA_MODE_DIS;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
        } else {
                if (clock->p1 == 2)
@@ -7585,10 +7650,10 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
                        dpll |= PLL_P2_DIVIDE_BY_4;
        }
 
-       if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
+       if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
                dpll |= DPLL_DVO_2X_MODE;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
            intel_panel_use_ssc(dev_priv))
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
@@ -7601,7 +7666,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = intel_crtc->pipe;
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -7618,7 +7683,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
                crtc_vtotal -= 1;
                crtc_vblank_end -= 1;
 
-               if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+               if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
                        vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
                else
                        vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7663,7 +7728,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = intel_crtc->pipe;
 
        /* pipesrc controls the size that is scaled from, which should
@@ -7678,7 +7743,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
                                   struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        uint32_t tmp;
 
@@ -7713,7 +7778,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
                                    struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp;
 
        tmp = I915_READ(PIPESRC(crtc->pipe));
@@ -7751,7 +7816,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t pipeconf;
 
        pipeconf = 0;
@@ -7797,7 +7862,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 
        if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
                if (INTEL_INFO(dev)->gen < 4 ||
-                   intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
+                   intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
                        pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
                else
                        pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7816,21 +7881,21 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
                                   struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const intel_limit_t *limit;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       const struct intel_limit *limit;
        int refclk = 48000;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        refclk = dev_priv->vbt.lvds_ssc_freq;
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
                }
 
                limit = &intel_limits_i8xx_lvds;
-       } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
                limit = &intel_limits_i8xx_dvo;
        } else {
                limit = &intel_limits_i8xx_dac;
@@ -7852,14 +7917,14 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const intel_limit_t *limit;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       const struct intel_limit *limit;
        int refclk = 96000;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        refclk = dev_priv->vbt.lvds_ssc_freq;
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7869,10 +7934,10 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
                        limit = &intel_limits_g4x_dual_channel_lvds;
                else
                        limit = &intel_limits_g4x_single_channel_lvds;
-       } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
-                  intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+                  intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
                limit = &intel_limits_g4x_hdmi;
-       } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
                limit = &intel_limits_g4x_sdvo;
        } else {
                /* The option is for other outputs */
@@ -7895,14 +7960,14 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const intel_limit_t *limit;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       const struct intel_limit *limit;
        int refclk = 96000;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        refclk = dev_priv->vbt.lvds_ssc_freq;
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7929,14 +7994,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
                                   struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const intel_limit_t *limit;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       const struct intel_limit *limit;
        int refclk = 96000;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        refclk = dev_priv->vbt.lvds_ssc_freq;
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7963,7 +8028,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state)
 {
        int refclk = 100000;
-       const intel_limit_t *limit = &intel_limits_chv;
+       const struct intel_limit *limit = &intel_limits_chv;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
@@ -7984,7 +8049,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state)
 {
        int refclk = 100000;
-       const intel_limit_t *limit = &intel_limits_vlv;
+       const struct intel_limit *limit = &intel_limits_vlv;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
@@ -8005,7 +8070,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
                                 struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp;
 
        if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
@@ -8032,9 +8097,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
                               struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = pipe_config->cpu_transcoder;
-       intel_clock_t clock;
+       struct dpll clock;
        u32 mdiv;
        int refclk = 100000;
 
@@ -8060,7 +8125,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
                              struct intel_initial_plane_config *plane_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, base, offset;
        int pipe = crtc->pipe, plane = crtc->plane;
        int fourcc, pixel_format;
@@ -8128,10 +8193,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
                               struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = pipe_config->cpu_transcoder;
        enum dpio_channel port = vlv_pipe_to_channel(pipe);
-       intel_clock_t clock;
+       struct dpll clock;
        u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
        int refclk = 100000;
 
@@ -8162,7 +8227,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                                 struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        uint32_t tmp;
        bool ret;
@@ -8273,14 +8338,16 @@ out:
 
 static void ironlake_init_pch_refclk(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
+       int i;
        u32 val, final;
        bool has_lvds = false;
        bool has_cpu_edp = false;
        bool has_panel = false;
        bool has_ck505 = false;
        bool can_ssc = false;
+       bool using_ssc_source = false;
 
        /* We need to take the global config into account */
        for_each_intel_encoder(dev, encoder) {
@@ -8307,8 +8374,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
                can_ssc = true;
        }
 
-       DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
-                     has_panel, has_lvds, has_ck505);
+       /* Check if any DPLLs are using the SSC source */
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               u32 temp = I915_READ(PCH_DPLL(i));
+
+               if (!(temp & DPLL_VCO_ENABLE))
+                       continue;
+
+               if ((temp & PLL_REF_INPUT_MASK) ==
+                   PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+                       using_ssc_source = true;
+                       break;
+               }
+       }
+
+       DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+                     has_panel, has_lvds, has_ck505, using_ssc_source);
 
        /* Ironlake: try to setup display ref clock before DPLL
         * enabling. This is only under driver's control after
@@ -8345,9 +8426,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
                                final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
                } else
                        final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-       } else {
-               final |= DREF_SSC_SOURCE_DISABLE;
-               final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+       } else if (using_ssc_source) {
+               final |= DREF_SSC_SOURCE_ENABLE;
+               final |= DREF_SSC1_ENABLE;
        }
 
        if (final == val)
@@ -8393,7 +8474,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
                POSTING_READ(PCH_DREF_CONTROL);
                udelay(200);
        } else {
-               DRM_DEBUG_KMS("Disabling SSC entirely\n");
+               DRM_DEBUG_KMS("Disabling CPU source output\n");
 
                val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
@@ -8404,16 +8485,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
                POSTING_READ(PCH_DREF_CONTROL);
                udelay(200);
 
-               /* Turn off the SSC source */
-               val &= ~DREF_SSC_SOURCE_MASK;
-               val |= DREF_SSC_SOURCE_DISABLE;
+               if (!using_ssc_source) {
+                       DRM_DEBUG_KMS("Disabling SSC source\n");
 
-               /* Turn off SSC1 */
-               val &= ~DREF_SSC1_ENABLE;
+                       /* Turn off the SSC source */
+                       val &= ~DREF_SSC_SOURCE_MASK;
+                       val |= DREF_SSC_SOURCE_DISABLE;
 
-               I915_WRITE(PCH_DREF_CONTROL, val);
-               POSTING_READ(PCH_DREF_CONTROL);
-               udelay(200);
+                       /* Turn off SSC1 */
+                       val &= ~DREF_SSC1_ENABLE;
+
+                       I915_WRITE(PCH_DREF_CONTROL, val);
+                       POSTING_READ(PCH_DREF_CONTROL);
+                       udelay(200);
+               }
        }
 
        BUG_ON(val != final);
@@ -8427,16 +8512,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
        tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
        I915_WRITE(SOUTH_CHICKEN2, tmp);
 
-       if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
-                              FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+       if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+                       FDI_MPHY_IOSFSB_RESET_STATUS, 100))
                DRM_ERROR("FDI mPHY reset assert timeout\n");
 
        tmp = I915_READ(SOUTH_CHICKEN2);
        tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
        I915_WRITE(SOUTH_CHICKEN2, tmp);
 
-       if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
-                               FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+       if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+                        FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
                DRM_ERROR("FDI mPHY reset de-assert timeout\n");
 }
 
@@ -8524,7 +8609,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
                                 bool with_fdi)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t reg, tmp;
 
        if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
@@ -8563,7 +8648,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
 /* Sequence to disable CLKOUT_DP */
 static void lpt_disable_clkout_dp(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t reg, tmp;
 
        mutex_lock(&dev_priv->sb_lock);
@@ -8684,7 +8769,7 @@ void intel_init_pch_refclk(struct drm_device *dev)
 
 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        uint32_t val;
@@ -8726,7 +8811,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
 
 static void haswell_set_pipeconf(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        u32 val = 0;
@@ -8745,7 +8830,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
 
 static void haswell_set_pipemisc(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
        if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
@@ -8794,41 +8879,17 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
 
 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
                                  struct intel_crtc_state *crtc_state,
-                                 intel_clock_t *reduced_clock)
+                                 struct dpll *reduced_clock)
 {
        struct drm_crtc *crtc = &intel_crtc->base;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_atomic_state *state = crtc_state->base.state;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       struct intel_encoder *encoder;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dpll, fp, fp2;
-       int factor, i;
-       bool is_lvds = false, is_sdvo = false;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc_state->base.crtc)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-
-               switch (encoder->type) {
-               case INTEL_OUTPUT_LVDS:
-                       is_lvds = true;
-                       break;
-               case INTEL_OUTPUT_SDVO:
-               case INTEL_OUTPUT_HDMI:
-                       is_sdvo = true;
-                       break;
-               default:
-                       break;
-               }
-       }
+       int factor;
 
        /* Enable autotuning of the PLL clock (if permissible) */
        factor = 21;
-       if (is_lvds) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if ((intel_panel_use_ssc(dev_priv) &&
                     dev_priv->vbt.lvds_ssc_freq == 100000) ||
                    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
@@ -8852,7 +8913,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
 
        dpll = 0;
 
-       if (is_lvds)
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
                dpll |= DPLLB_MODE_LVDS;
        else
                dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -8860,9 +8921,11 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
        dpll |= (crtc_state->pixel_multiplier - 1)
                << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 
-       if (is_sdvo)
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
+           intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
                dpll |= DPLL_SDVO_HIGH_SPEED;
-       if (crtc_state->has_dp_encoder)
+
+       if (intel_crtc_has_dp_encoder(crtc_state))
                dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
@@ -8885,7 +8948,8 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
                break;
        }
 
-       if (is_lvds && intel_panel_use_ssc(dev_priv))
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+           intel_panel_use_ssc(dev_priv))
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
        else
                dpll |= PLL_REF_INPUT_DREFCLK;
@@ -8901,11 +8965,11 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
                                       struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       intel_clock_t reduced_clock;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct dpll reduced_clock;
        bool has_reduced_clock = false;
        struct intel_shared_dpll *pll;
-       const intel_limit_t *limit;
+       const struct intel_limit *limit;
        int refclk = 120000;
 
        memset(&crtc_state->dpll_hw_state, 0,
@@ -8917,7 +8981,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
        if (!crtc_state->has_pch_encoder)
                return 0;
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
                if (intel_panel_use_ssc(dev_priv)) {
                        DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
                                      dev_priv->vbt.lvds_ssc_freq);
@@ -8956,7 +9020,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
                return -EINVAL;
        }
 
-       if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
            has_reduced_clock)
                crtc->lowfreq_avail = true;
 
@@ -8967,7 +9031,7 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
                                         struct intel_link_m_n *m_n)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
 
        m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
@@ -8985,7 +9049,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
                                         struct intel_link_m_n *m2_n2)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = crtc->pipe;
 
        if (INTEL_INFO(dev)->gen >= 5) {
@@ -9043,7 +9107,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
                                    struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
        uint32_t ps_ctrl = 0;
        int id = -1;
@@ -9074,7 +9138,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
                                 struct intel_initial_plane_config *plane_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, base, offset, stride_mult, tiling;
        int pipe = crtc->pipe;
        int fourcc, pixel_format;
@@ -9157,7 +9221,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
                                     struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp;
 
        tmp = I915_READ(PF_CTL(crtc->pipe));
@@ -9182,7 +9246,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
                                  struct intel_initial_plane_config *plane_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val, base, offset;
        int pipe = crtc->pipe;
        int fourcc, pixel_format;
@@ -9250,7 +9314,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
                                     struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        uint32_t tmp;
        bool ret;
@@ -9300,6 +9364,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
                ironlake_get_fdi_m_n_config(crtc, pipe_config);
 
                if (HAS_PCH_IBX(dev_priv)) {
+                       /*
+                        * The pipe->pch transcoder and pch transcoder->pll
+                        * mapping is fixed.
+                        */
                        pll_id = (enum intel_dpll_id) crtc->pipe;
                } else {
                        tmp = I915_READ(PCH_DPLL_SEL);
@@ -9341,7 +9409,7 @@ out:
 
 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
 
        for_each_intel_crtc(dev, crtc)
@@ -9375,7 +9443,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
 
 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        if (IS_HASWELL(dev))
                return I915_READ(D_COMP_HSW);
@@ -9385,7 +9453,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
 
 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        if (IS_HASWELL(dev)) {
                mutex_lock(&dev_priv->rps.hw_lock);
@@ -9420,8 +9488,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
                val |= LCPLL_CD_SOURCE_FCLK;
                I915_WRITE(LCPLL_CTL, val);
 
-               if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
-                                      LCPLL_CD_SOURCE_FCLK_DONE, 1))
+               if (wait_for_us(I915_READ(LCPLL_CTL) &
+                               LCPLL_CD_SOURCE_FCLK_DONE, 1))
                        DRM_ERROR("Switching to FCLK failed\n");
 
                val = I915_READ(LCPLL_CTL);
@@ -9431,7 +9499,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
        I915_WRITE(LCPLL_CTL, val);
        POSTING_READ(LCPLL_CTL);
 
-       if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+       if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
                DRM_ERROR("LCPLL still locked\n");
 
        val = hsw_read_dcomp(dev_priv);
@@ -9486,7 +9554,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
        val &= ~LCPLL_PLL_DISABLE;
        I915_WRITE(LCPLL_CTL, val);
 
-       if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+       if (intel_wait_for_register(dev_priv,
+                                   LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+                                   5))
                DRM_ERROR("LCPLL not locked yet\n");
 
        if (val & LCPLL_CD_SOURCE_FCLK) {
@@ -9494,13 +9564,13 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
                val &= ~LCPLL_CD_SOURCE_FCLK;
                I915_WRITE(LCPLL_CTL, val);
 
-               if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
-                                       LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+               if (wait_for_us((I915_READ(LCPLL_CTL) &
+                                LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
                        DRM_ERROR("Switching back to LCPLL failed\n");
        }
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       intel_update_cdclk(dev_priv->dev);
+       intel_update_cdclk(&dev_priv->drm);
 }
 
 /*
@@ -9528,7 +9598,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
  */
 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        uint32_t val;
 
        DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -9545,7 +9615,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
 
 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        uint32_t val;
 
        DRM_DEBUG_KMS("Disabling package C8+\n");
@@ -9560,21 +9630,21 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
        }
 }
 
-static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
        struct intel_atomic_state *old_intel_state =
                to_intel_atomic_state(old_state);
        unsigned int req_cdclk = old_intel_state->dev_cdclk;
 
-       broxton_set_cdclk(to_i915(dev), req_cdclk);
+       bxt_set_cdclk(to_i915(dev), req_cdclk);
 }
 
 /* compute the max rate for new configuration */
 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 {
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = state->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
        struct drm_crtc *crtc;
        struct drm_crtc_state *cstate;
        struct intel_crtc_state *crtc_state;
@@ -9610,7 +9680,7 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 
 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t val, data;
        int ret;
 
@@ -9687,6 +9757,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
             cdclk, dev_priv->cdclk_freq);
 }
 
+static int broadwell_calc_cdclk(int max_pixclk)
+{
+       if (max_pixclk > 540000)
+               return 675000;
+       else if (max_pixclk > 450000)
+               return 540000;
+       else if (max_pixclk > 337500)
+               return 450000;
+       else
+               return 337500;
+}
+
 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
        struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -9698,14 +9780,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
         * FIXME should also account for plane ratio
         * once 64bpp pixel formats are supported.
         */
-       if (max_pixclk > 540000)
-               cdclk = 675000;
-       else if (max_pixclk > 450000)
-               cdclk = 540000;
-       else if (max_pixclk > 337500)
-               cdclk = 450000;
-       else
-               cdclk = 337500;
+       cdclk = broadwell_calc_cdclk(max_pixclk);
 
        if (cdclk > dev_priv->max_cdclk_freq) {
                DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
@@ -9715,7 +9790,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
 
        intel_state->cdclk = intel_state->dev_cdclk = cdclk;
        if (!intel_state->active_crtcs)
-               intel_state->dev_cdclk = 337500;
+               intel_state->dev_cdclk = broadwell_calc_cdclk(0);
 
        return 0;
 }
@@ -9730,13 +9805,51 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
        broadwell_set_cdclk(dev, req_cdclk);
 }
 
+static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
+       const int max_pixclk = ilk_max_pixel_rate(state);
+       int vco = intel_state->cdclk_pll_vco;
+       int cdclk;
+
+       /*
+        * FIXME should also account for plane ratio
+        * once 64bpp pixel formats are supported.
+        */
+       cdclk = skl_calc_cdclk(max_pixclk, vco);
+
+       /*
+        * FIXME move the cdclk caclulation to
+        * compute_config() so we can fail gracegully.
+        */
+       if (cdclk > dev_priv->max_cdclk_freq) {
+               DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+                         cdclk, dev_priv->max_cdclk_freq);
+               cdclk = dev_priv->max_cdclk_freq;
+       }
+
+       intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+       if (!intel_state->active_crtcs)
+               intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
+
+       return 0;
+}
+
+static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(old_state->dev);
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
+       unsigned int req_cdclk = intel_state->dev_cdclk;
+       unsigned int req_vco = intel_state->cdclk_pll_vco;
+
+       skl_set_cdclk(dev_priv, req_cdclk, req_vco);
+}
+
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
                                      struct intel_crtc_state *crtc_state)
 {
-       struct intel_encoder *intel_encoder =
-               intel_ddi_get_crtc_new_encoder(crtc_state);
-
-       if (intel_encoder->type != INTEL_OUTPUT_DSI) {
+       if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
                if (!intel_ddi_pll_select(crtc, crtc_state))
                        return -EINVAL;
        }
@@ -9846,10 +9959,14 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
                                     unsigned long *power_domain_mask)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        u32 tmp;
 
+       /*
+        * The pipe->transcoder mapping is fixed with the exception of the eDP
+        * transcoder handled below.
+        */
        pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
 
        /*
@@ -9893,14 +10010,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
                                         unsigned long *power_domain_mask)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        enum port port;
        enum transcoder cpu_transcoder;
        u32 tmp;
 
-       pipe_config->has_dsi_encoder = false;
-
        for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
                if (port == PORT_A)
                        cpu_transcoder = TRANSCODER_DSI_A;
@@ -9932,18 +10047,17 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
                        continue;
 
                pipe_config->cpu_transcoder = cpu_transcoder;
-               pipe_config->has_dsi_encoder = true;
                break;
        }
 
-       return pipe_config->has_dsi_encoder;
+       return transcoder_is_dsi(pipe_config->cpu_transcoder);
 }
 
 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
                                       struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_shared_dpll *pll;
        enum port port;
        uint32_t tmp;
@@ -9986,7 +10100,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
                                    struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        unsigned long power_domain_mask;
        bool active;
@@ -10000,18 +10114,16 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
 
        active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
 
-       if (IS_BROXTON(dev_priv)) {
-               bxt_get_dsi_transcoder_state(crtc, pipe_config,
-                                            &power_domain_mask);
-               WARN_ON(active && pipe_config->has_dsi_encoder);
-               if (pipe_config->has_dsi_encoder)
-                       active = true;
+       if (IS_BROXTON(dev_priv) &&
+           bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
+               WARN_ON(active);
+               active = true;
        }
 
        if (!active)
                goto out;
 
-       if (!pipe_config->has_dsi_encoder) {
+       if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
                haswell_get_ddi_port_state(crtc, pipe_config);
                intel_get_pipe_timings(crtc, pipe_config);
        }
@@ -10062,7 +10174,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
                               const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t cntl = 0, size = 0;
 
@@ -10125,7 +10237,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
                               const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        uint32_t cntl = 0;
@@ -10173,7 +10285,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
                                     const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
        u32 base = intel_crtc->cursor_addr;
@@ -10317,10 +10429,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
        struct drm_i915_gem_object *obj;
        struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 
-       obj = i915_gem_alloc_object(dev,
+       obj = i915_gem_object_create(dev,
                                    intel_framebuffer_size_for_mode(mode, bpp));
-       if (obj == NULL)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(obj))
+               return ERR_CAST(obj);
 
        mode_cmd.width = mode->hdisplay;
        mode_cmd.height = mode->vdisplay;
@@ -10340,7 +10452,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
                   struct drm_display_mode *mode)
 {
 #ifdef CONFIG_DRM_FBDEV_EMULATION
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        struct drm_framebuffer *fb;
 
@@ -10610,7 +10722,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 static int i9xx_pll_refclk(struct drm_device *dev,
                           const struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dpll = pipe_config->dpll_hw_state.dpll;
 
        if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
@@ -10628,11 +10740,11 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                                struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe = pipe_config->cpu_transcoder;
        u32 dpll = pipe_config->dpll_hw_state.dpll;
        u32 fp;
-       intel_clock_t clock;
+       struct dpll clock;
        int port_clock;
        int refclk = i9xx_pll_refclk(dev, pipe_config);
 
@@ -10754,7 +10866,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
                                             struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        struct drm_display_mode *mode;
@@ -10806,48 +10918,20 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        return mode;
 }
 
-void intel_mark_busy(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (dev_priv->mm.busy)
-               return;
-
-       intel_runtime_pm_get(dev_priv);
-       i915_update_gfx_val(dev_priv);
-       if (INTEL_INFO(dev)->gen >= 6)
-               gen6_rps_busy(dev_priv);
-       dev_priv->mm.busy = true;
-}
-
-void intel_mark_idle(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (!dev_priv->mm.busy)
-               return;
-
-       dev_priv->mm.busy = false;
-
-       if (INTEL_INFO(dev)->gen >= 6)
-               gen6_rps_idle(dev->dev_private);
-
-       intel_runtime_pm_put(dev_priv);
-}
-
 static void intel_crtc_destroy(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       struct intel_unpin_work *work;
+       struct intel_flip_work *work;
 
        spin_lock_irq(&dev->event_lock);
-       work = intel_crtc->unpin_work;
-       intel_crtc->unpin_work = NULL;
+       work = intel_crtc->flip_work;
+       intel_crtc->flip_work = NULL;
        spin_unlock_irq(&dev->event_lock);
 
        if (work) {
-               cancel_work_sync(&work->work);
+               cancel_work_sync(&work->mmio_work);
+               cancel_work_sync(&work->unpin_work);
                kfree(work);
        }
 
@@ -10858,12 +10942,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
 
 static void intel_unpin_work_fn(struct work_struct *__work)
 {
-       struct intel_unpin_work *work =
-               container_of(__work, struct intel_unpin_work, work);
+       struct intel_flip_work *work =
+               container_of(__work, struct intel_flip_work, unpin_work);
        struct intel_crtc *crtc = to_intel_crtc(work->crtc);
        struct drm_device *dev = crtc->base.dev;
        struct drm_plane *primary = crtc->base.primary;
 
+       if (is_mmio_work(work))
+               flush_work(&work->mmio_work);
+
        mutex_lock(&dev->struct_mutex);
        intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
        drm_gem_object_unreference(&work->pending_flip_obj->base);
@@ -10882,63 +10969,17 @@ static void intel_unpin_work_fn(struct work_struct *__work)
        kfree(work);
 }
 
-static void do_intel_finish_page_flip(struct drm_device *dev,
-                                     struct drm_crtc *crtc)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_unpin_work *work;
-       unsigned long flags;
-
-       /* Ignore early vblank irqs */
-       if (intel_crtc == NULL)
-               return;
-
-       /*
-        * This is called both by irq handlers and the reset code (to complete
-        * lost pageflips) so needs the full irqsave spinlocks.
-        */
-       spin_lock_irqsave(&dev->event_lock, flags);
-       work = intel_crtc->unpin_work;
-
-       /* Ensure we don't miss a work->pending update ... */
-       smp_rmb();
-
-       if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-               return;
-       }
-
-       page_flip_completed(intel_crtc);
-
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-void intel_finish_page_flip(struct drm_device *dev, int pipe)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-
-       do_intel_finish_page_flip(dev, crtc);
-}
-
-void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
-
-       do_intel_finish_page_flip(dev, crtc);
-}
-
 /* Is 'a' after or equal to 'b'? */
 static bool g4x_flip_count_after_eq(u32 a, u32 b)
 {
        return !((a - b) & 0x80000000);
 }
 
-static bool page_flip_finished(struct intel_crtc *crtc)
+static bool __pageflip_finished_cs(struct intel_crtc *crtc,
+                                  struct intel_flip_work *work)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned reset_counter;
 
        reset_counter = i915_reset_counter(&dev_priv->gpu_error);
@@ -10977,40 +11018,103 @@ static bool page_flip_finished(struct intel_crtc *crtc)
         * anyway, we don't really care.
         */
        return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
-               crtc->unpin_work->gtt_offset &&
+               crtc->flip_work->gtt_offset &&
                g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
-                                   crtc->unpin_work->flip_count);
+                                   crtc->flip_work->flip_count);
 }
 
-void intel_prepare_page_flip(struct drm_device *dev, int plane)
+static bool
+__pageflip_finished_mmio(struct intel_crtc *crtc,
+                              struct intel_flip_work *work)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
-       unsigned long flags;
-
-
+       /*
+        * MMIO work completes when vblank is different from
+        * flip_queued_vblank.
+        *
+        * Reset counter value doesn't matter, this is handled by
+        * i915_wait_request finishing early, so no need to handle
+        * reset here.
+        */
+       return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
+}
+
+
+static bool pageflip_finished(struct intel_crtc *crtc,
+                             struct intel_flip_work *work)
+{
+       if (!atomic_read(&work->pending))
+               return false;
+
+       smp_rmb();
+
+       if (is_mmio_work(work))
+               return __pageflip_finished_mmio(crtc, work);
+       else
+               return __pageflip_finished_cs(crtc, work);
+}
+
+void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
+{
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_flip_work *work;
+       unsigned long flags;
+
+       /* Ignore early vblank irqs */
+       if (!crtc)
+               return;
+
        /*
         * This is called both by irq handlers and the reset code (to complete
         * lost pageflips) so needs the full irqsave spinlocks.
-        *
-        * NB: An MMIO update of the plane base pointer will also
-        * generate a page-flip completion irq, i.e. every modeset
-        * is also accompanied by a spurious intel_prepare_page_flip().
         */
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
-               atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
+       work = intel_crtc->flip_work;
+
+       if (work != NULL &&
+           !is_mmio_work(work) &&
+           pageflip_finished(intel_crtc, work))
+               page_flip_completed(intel_crtc);
+
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
-static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
+void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
 {
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_flip_work *work;
+       unsigned long flags;
+
+       /* Ignore early vblank irqs */
+       if (!crtc)
+               return;
+
+       /*
+        * This is called both by irq handlers and the reset code (to complete
+        * lost pageflips) so needs the full irqsave spinlocks.
+        */
+       spin_lock_irqsave(&dev->event_lock, flags);
+       work = intel_crtc->flip_work;
+
+       if (work != NULL &&
+           is_mmio_work(work) &&
+           pageflip_finished(intel_crtc, work))
+               page_flip_completed(intel_crtc);
+
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
+                                              struct intel_flip_work *work)
+{
+       work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
+
        /* Ensure that the work item is consistent when activating it ... */
-       smp_wmb();
-       atomic_set(&work->pending, INTEL_FLIP_PENDING);
-       /* and that it is marked active as soon as the irq could fire. */
-       smp_wmb();
+       smp_mb__before_atomic();
+       atomic_set(&work->pending, 1);
 }
 
 static int intel_gen2_queue_flip(struct drm_device *dev,
@@ -11041,10 +11145,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        intel_ring_emit(engine, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(engine, fb->pitches[0]);
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
        intel_ring_emit(engine, 0); /* aux display base address, unused */
 
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
 }
 
@@ -11073,10 +11176,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(engine, fb->pitches[0]);
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
        intel_ring_emit(engine, MI_NOOP);
 
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
 }
 
@@ -11088,7 +11190,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
                                 uint32_t flags)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
        int ret;
@@ -11104,7 +11206,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
        intel_ring_emit(engine, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(engine, fb->pitches[0]);
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
                        obj->tiling_mode);
 
        /* XXX Enabling the panel-fitter across page-flip is so far
@@ -11115,7 +11217,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(engine, pf | pipesrc);
 
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
 }
 
@@ -11127,7 +11228,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
                                 uint32_t flags)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
        int ret;
@@ -11139,7 +11240,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        intel_ring_emit(engine, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
 
        /* Contrary to the suggestions in the documentation,
         * "Enable Panel Fitter" does not seem to be required when page
@@ -11151,7 +11252,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(engine, pf | pipesrc);
 
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
 }
 
@@ -11243,16 +11343,17 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 
        intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
        intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
-       intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+       intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
        intel_ring_emit(engine, (MI_NOOP));
 
-       intel_mark_page_flip_active(intel_crtc->unpin_work);
        return 0;
 }
 
 static bool use_mmio_flip(struct intel_engine_cs *engine,
                          struct drm_i915_gem_object *obj)
 {
+       struct reservation_object *resv;
+
        /*
         * This is not being used for older platforms, because
         * non-availability of flip done interrupt forces us to use
@@ -11264,7 +11365,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
        if (engine == NULL)
                return true;
 
-       if (INTEL_INFO(engine->dev)->gen < 5)
+       if (INTEL_GEN(engine->i915) < 5)
                return false;
 
        if (i915.use_mmio_flip < 0)
@@ -11273,20 +11374,20 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
                return true;
        else if (i915.enable_execlists)
                return true;
-       else if (obj->base.dma_buf &&
-                !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
-                                                      false))
+
+       resv = i915_gem_object_get_dmabuf_resv(obj);
+       if (resv && !reservation_object_test_signaled_rcu(resv, false))
                return true;
-       else
-               return engine != i915_gem_request_get_engine(obj->last_write_req);
+
+       return engine != i915_gem_request_get_engine(obj->last_write_req);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
                             unsigned int rotation,
-                            struct intel_unpin_work *work)
+                            struct intel_flip_work *work)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
        const enum pipe pipe = intel_crtc->pipe;
        u32 ctl, stride, tile_height;
@@ -11335,10 +11436,10 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
 }
 
 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
-                            struct intel_unpin_work *work)
+                            struct intel_flip_work *work)
 {
        struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_framebuffer *intel_fb =
                to_intel_framebuffer(intel_crtc->base.primary->fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
@@ -11358,78 +11459,37 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
        POSTING_READ(DSPSURF(intel_crtc->plane));
 }
 
-/*
- * XXX: This is the temporary way to update the plane registers until we get
- * around to using the usual plane update functions for MMIO flips
- */
-static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
+static void intel_mmio_flip_work_func(struct work_struct *w)
 {
-       struct intel_crtc *crtc = mmio_flip->crtc;
-       struct intel_unpin_work *work;
-
-       spin_lock_irq(&crtc->base.dev->event_lock);
-       work = crtc->unpin_work;
-       spin_unlock_irq(&crtc->base.dev->event_lock);
-       if (work == NULL)
-               return;
-
-       intel_mark_page_flip_active(work);
-
-       intel_pipe_update_start(crtc);
-
-       if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
-               skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
-       else
-               /* use_mmio_flip() retricts MMIO flips to ilk+ */
-               ilk_do_mmio_flip(crtc, work);
-
-       intel_pipe_update_end(crtc);
-}
-
-static void intel_mmio_flip_work_func(struct work_struct *work)
-{
-       struct intel_mmio_flip *mmio_flip =
-               container_of(work, struct intel_mmio_flip, work);
+       struct intel_flip_work *work =
+               container_of(w, struct intel_flip_work, mmio_work);
+       struct intel_crtc *crtc = to_intel_crtc(work->crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_framebuffer *intel_fb =
-               to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
+               to_intel_framebuffer(crtc->base.primary->fb);
        struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct reservation_object *resv;
 
-       if (mmio_flip->req) {
-               WARN_ON(__i915_wait_request(mmio_flip->req,
+       if (work->flip_queued_req)
+               WARN_ON(__i915_wait_request(work->flip_queued_req,
                                            false, NULL,
-                                           &mmio_flip->i915->rps.mmioflips));
-               i915_gem_request_unreference__unlocked(mmio_flip->req);
-       }
+                                           &dev_priv->rps.mmioflips));
 
        /* For framebuffer backed by dmabuf, wait for fence */
-       if (obj->base.dma_buf)
-               WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
-                                                           false, false,
+       resv = i915_gem_object_get_dmabuf_resv(obj);
+       if (resv)
+               WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
                                                            MAX_SCHEDULE_TIMEOUT) < 0);
 
-       intel_do_mmio_flip(mmio_flip);
-       kfree(mmio_flip);
-}
-
-static int intel_queue_mmio_flip(struct drm_device *dev,
-                                struct drm_crtc *crtc,
-                                struct drm_i915_gem_object *obj)
-{
-       struct intel_mmio_flip *mmio_flip;
-
-       mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
-       if (mmio_flip == NULL)
-               return -ENOMEM;
-
-       mmio_flip->i915 = to_i915(dev);
-       mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
-       mmio_flip->crtc = to_intel_crtc(crtc);
-       mmio_flip->rotation = crtc->primary->state->rotation;
+       intel_pipe_update_start(crtc);
 
-       INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
-       schedule_work(&mmio_flip->work);
+       if (INTEL_GEN(dev_priv) >= 9)
+               skl_do_mmio_flip(crtc, work->rotation, work);
+       else
+               /* use_mmio_flip() retricts MMIO flips to ilk+ */
+               ilk_do_mmio_flip(crtc, work);
 
-       return 0;
+       intel_pipe_update_end(crtc, work);
 }
 
 static int intel_default_queue_flip(struct drm_device *dev,
@@ -11442,37 +11502,32 @@ static int intel_default_queue_flip(struct drm_device *dev,
        return -ENODEV;
 }
 
-static bool __intel_pageflip_stall_check(struct drm_device *dev,
-                                        struct drm_crtc *crtc)
+static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
+                                     struct intel_crtc *intel_crtc,
+                                     struct intel_flip_work *work)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_unpin_work *work = intel_crtc->unpin_work;
-       u32 addr;
+       u32 addr, vblank;
 
-       if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
-               return true;
-
-       if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
+       if (!atomic_read(&work->pending))
                return false;
 
-       if (!work->enable_stall_check)
-               return false;
+       smp_rmb();
 
+       vblank = intel_crtc_get_vblank_counter(intel_crtc);
        if (work->flip_ready_vblank == 0) {
                if (work->flip_queued_req &&
-                   !i915_gem_request_completed(work->flip_queued_req, true))
+                   !i915_gem_request_completed(work->flip_queued_req))
                        return false;
 
-               work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
+               work->flip_ready_vblank = vblank;
        }
 
-       if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
+       if (vblank - work->flip_ready_vblank < 3)
                return false;
 
        /* Potential stall - if we see that the flip has happened,
         * assume a missed interrupt. */
-       if (INTEL_INFO(dev)->gen >= 4)
+       if (INTEL_GEN(dev_priv) >= 4)
                addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
        else
                addr = I915_READ(DSPADDR(intel_crtc->plane));
@@ -11484,12 +11539,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
        return addr == work->gtt_offset;
 }
 
-void intel_check_page_flip(struct drm_device *dev, int pipe)
+void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_unpin_work *work;
+       struct intel_flip_work *work;
 
        WARN_ON(!in_interrupt());
 
@@ -11497,16 +11552,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
                return;
 
        spin_lock(&dev->event_lock);
-       work = intel_crtc->unpin_work;
-       if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
-               WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
-                        work->flip_queued_vblank, drm_vblank_count(dev, pipe));
+       work = intel_crtc->flip_work;
+
+       if (work != NULL && !is_mmio_work(work) &&
+           __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
+               WARN_ONCE(1,
+                         "Kicking stuck page flip: queued at %d, now %d\n",
+                       work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
                page_flip_completed(intel_crtc);
                work = NULL;
        }
-       if (work != NULL &&
-           drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
-               intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
+
+       if (work != NULL && !is_mmio_work(work) &&
+           intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
+               intel_queue_rps_boost_for_request(work->flip_queued_req);
        spin_unlock(&dev->event_lock);
 }
 
@@ -11516,13 +11575,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                                uint32_t page_flip_flags)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_framebuffer *old_fb = crtc->primary->fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_plane *primary = crtc->primary;
        enum pipe pipe = intel_crtc->pipe;
-       struct intel_unpin_work *work;
+       struct intel_flip_work *work;
        struct intel_engine_cs *engine;
        bool mmio_flip;
        struct drm_i915_gem_request *request = NULL;
@@ -11559,19 +11618,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->event = event;
        work->crtc = crtc;
        work->old_fb = old_fb;
-       INIT_WORK(&work->work, intel_unpin_work_fn);
+       INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
 
        ret = drm_crtc_vblank_get(crtc);
        if (ret)
                goto free_work;
 
-       /* We borrow the event spin lock for protecting unpin_work */
+       /* We borrow the event spin lock for protecting flip_work */
        spin_lock_irq(&dev->event_lock);
-       if (intel_crtc->unpin_work) {
+       if (intel_crtc->flip_work) {
                /* Before declaring the flip queue wedged, check if
                 * the hardware completed the operation behind our backs.
                 */
-               if (__intel_pageflip_stall_check(dev, crtc)) {
+               if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
                        DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
                        page_flip_completed(intel_crtc);
                } else {
@@ -11583,7 +11642,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                        return -EBUSY;
                }
        }
-       intel_crtc->unpin_work = work;
+       intel_crtc->flip_work = work;
        spin_unlock_irq(&dev->event_lock);
 
        if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
@@ -11595,7 +11654,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        crtc->primary->fb = fb;
        update_state_fb(crtc->primary);
-       intel_fbc_pre_update(intel_crtc);
+
+       intel_fbc_pre_update(intel_crtc, intel_crtc->config,
+                            to_intel_plane_state(primary->state));
 
        work->pending_flip_obj = obj;
 
@@ -11638,6 +11699,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
         */
        if (!mmio_flip) {
                ret = i915_gem_object_sync(obj, engine, &request);
+               if (!ret && !request) {
+                       request = i915_gem_request_alloc(engine, NULL);
+                       ret = PTR_ERR_OR_ZERO(request);
+               }
+
                if (ret)
                        goto cleanup_pending;
        }
@@ -11649,38 +11715,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
                                                  obj, 0);
        work->gtt_offset += intel_crtc->dspaddr_offset;
+       work->rotation = crtc->primary->state->rotation;
 
        if (mmio_flip) {
-               ret = intel_queue_mmio_flip(dev, crtc, obj);
-               if (ret)
-                       goto cleanup_unpin;
+               INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
 
                i915_gem_request_assign(&work->flip_queued_req,
                                        obj->last_write_req);
-       } else {
-               if (!request) {
-                       request = i915_gem_request_alloc(engine, NULL);
-                       if (IS_ERR(request)) {
-                               ret = PTR_ERR(request);
-                               goto cleanup_unpin;
-                       }
-               }
 
+               schedule_work(&work->mmio_work);
+       } else {
+               i915_gem_request_assign(&work->flip_queued_req, request);
                ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
                                                   page_flip_flags);
                if (ret)
                        goto cleanup_unpin;
 
-               i915_gem_request_assign(&work->flip_queued_req, request);
-       }
+               intel_mark_page_flip_active(intel_crtc, work);
 
-       if (request)
                i915_add_request_no_flush(request);
+       }
 
-       work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
-       work->enable_stall_check = true;
-
-       i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
+       i915_gem_track_fb(intel_fb_obj(old_fb), obj,
                          to_intel_plane(primary)->frontbuffer_bit);
        mutex_unlock(&dev->struct_mutex);
 
@@ -11706,7 +11762,7 @@ cleanup:
        drm_framebuffer_unreference(work->old_fb);
 
        spin_lock_irq(&dev->event_lock);
-       intel_crtc->unpin_work = NULL;
+       intel_crtc->flip_work = NULL;
        spin_unlock_irq(&dev->event_lock);
 
        drm_crtc_vblank_put(crtc);
@@ -11808,15 +11864,14 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane_state *old_plane_state =
                to_intel_plane_state(plane->state);
-       int idx = intel_crtc->base.base.id, ret;
        bool mode_changed = needs_modeset(crtc_state);
        bool was_crtc_enabled = crtc->state->active;
        bool is_crtc_enabled = crtc_state->active;
        bool turn_off, turn_on, visible, was_visible;
        struct drm_framebuffer *fb = plane_state->fb;
+       int ret;
 
-       if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
-           plane->type != DRM_PLANE_TYPE_CURSOR) {
+       if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
                ret = skl_update_scaler_plane(
                        to_intel_crtc_state(crtc_state),
                        to_intel_plane_state(plane_state));
@@ -11834,6 +11889,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
         * Visibility is calculated as if the crtc was on, but
         * after scaler setup everything depends on it being off
         * when the crtc isn't active.
+        *
+        * FIXME this is wrong for watermarks. Watermarks should also
+        * be computed as if the pipe would be active. Perhaps move
+        * per-plane wm computation to the .check_plane() hook, and
+        * only combine the results from all planes in the current place?
         */
        if (!is_crtc_enabled)
                to_intel_plane_state(plane_state)->visible = visible = false;
@@ -11847,11 +11907,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
        turn_off = was_visible && (!visible || mode_changed);
        turn_on = visible && (!was_visible || mode_changed);
 
-       DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
-                        plane->base.id, fb ? fb->base.id : -1);
+       DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
+                        intel_crtc->base.base.id,
+                        intel_crtc->base.name,
+                        plane->base.id, plane->name,
+                        fb ? fb->base.id : -1);
 
-       DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
-                        plane->base.id, was_visible, visible,
+       DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+                        plane->base.id, plane->name,
+                        was_visible, visible,
                         turn_off, turn_on, mode_changed);
 
        if (turn_on) {
@@ -11924,31 +11988,11 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
        return true;
 }
 
-static bool check_encoder_cloning(struct drm_atomic_state *state,
-                                 struct intel_crtc *crtc)
-{
-       struct intel_encoder *encoder;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int i;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != &crtc->base)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-               if (!check_single_encoder_cloning(state, crtc, encoder))
-                       return false;
-       }
-
-       return true;
-}
-
 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                                   struct drm_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc_state);
@@ -11956,11 +12000,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
        int ret;
        bool mode_changed = needs_modeset(crtc_state);
 
-       if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
-               DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
-               return -EINVAL;
-       }
-
        if (mode_changed && !crtc_state->active)
                pipe_config->update_wm_post = true;
 
@@ -11977,6 +12016,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                ret = intel_color_check(crtc, crtc_state);
                if (ret)
                        return ret;
+
+               /*
+                * Changing color management on Intel hardware is
+                * handled as part of planes update.
+                */
+               crtc_state->planes_changed = true;
        }
 
        ret = 0;
@@ -12007,7 +12052,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                }
        } else if (dev_priv->display.compute_intermediate_wm) {
                if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
-                       pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
+                       pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
        }
 
        if (INTEL_INFO(dev)->gen >= 9) {
@@ -12142,7 +12187,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
        struct intel_plane_state *state;
        struct drm_framebuffer *fb;
 
-       DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
+       DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
+                     crtc->base.base.id, crtc->base.name,
                      context, pipe_config, pipe_name(crtc->pipe));
 
        DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
@@ -12155,14 +12201,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
                      pipe_config->fdi_m_n.tu);
        DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
-                     pipe_config->has_dp_encoder,
+                     intel_crtc_has_dp_encoder(pipe_config),
                      pipe_config->lane_count,
                      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
                      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
                      pipe_config->dp_m_n.tu);
 
        DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
-                     pipe_config->has_dp_encoder,
+                     intel_crtc_has_dp_encoder(pipe_config),
                      pipe_config->lane_count,
                      pipe_config->dp_m2_n2.gmch_m,
                      pipe_config->dp_m2_n2.gmch_n,
@@ -12243,29 +12289,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                state = to_intel_plane_state(plane->state);
                fb = state->base.fb;
                if (!fb) {
-                       DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
-                               "disabled, scaler_id = %d\n",
-                               plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
-                               plane->base.id, intel_plane->pipe,
-                               (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
-                               drm_plane_index(plane), state->scaler_id);
+                       DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
+                                     plane->base.id, plane->name, state->scaler_id);
                        continue;
                }
 
-               DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
-                       plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
-                       plane->base.id, intel_plane->pipe,
-                       crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
-                       drm_plane_index(plane));
-               DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
-                       fb->base.id, fb->width, fb->height, fb->pixel_format);
-               DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
-                       state->scaler_id,
-                       state->src.x1 >> 16, state->src.y1 >> 16,
-                       drm_rect_width(&state->src) >> 16,
-                       drm_rect_height(&state->src) >> 16,
-                       state->dst.x1, state->dst.y1,
-                       drm_rect_width(&state->dst), drm_rect_height(&state->dst));
+               DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
+                             plane->base.id, plane->name);
+               DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
+                             fb->base.id, fb->width, fb->height,
+                             drm_get_format_name(fb->pixel_format));
+               DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
+                             state->scaler_id,
+                             state->src.x1 >> 16, state->src.y1 >> 16,
+                             drm_rect_width(&state->src) >> 16,
+                             drm_rect_height(&state->src) >> 16,
+                             state->dst.x1, state->dst.y1,
+                             drm_rect_width(&state->dst),
+                             drm_rect_height(&state->dst));
        }
 }
 
@@ -12300,7 +12341,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
                case INTEL_OUTPUT_UNKNOWN:
                        if (WARN_ON(!HAS_DDI(dev)))
                                break;
-               case INTEL_OUTPUT_DISPLAYPORT:
+               case INTEL_OUTPUT_DP:
                case INTEL_OUTPUT_HDMI:
                case INTEL_OUTPUT_EDP:
                        port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
@@ -12397,6 +12438,24 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
                               &pipe_config->pipe_src_w,
                               &pipe_config->pipe_src_h);
 
+       for_each_connector_in_state(state, connector, connector_state, i) {
+               if (connector_state->crtc != crtc)
+                       continue;
+
+               encoder = to_intel_encoder(connector_state->best_encoder);
+
+               if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
+                       DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+                       goto fail;
+               }
+
+               /*
+                * Determine output_types before calling the .compute_config()
+                * hooks so that the hooks can use this information safely.
+                */
+               pipe_config->output_types |= 1 << encoder->type;
+       }
+
 encoder_retry:
        /* Ensure the port clock defaults are reset when retrying. */
        pipe_config->port_clock = 0;
@@ -12682,8 +12741,8 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_I(fdi_lanes);
        PIPE_CONF_CHECK_M_N(fdi_m_n);
 
-       PIPE_CONF_CHECK_I(has_dp_encoder);
        PIPE_CONF_CHECK_I(lane_count);
+       PIPE_CONF_CHECK_X(lane_lat_optim_mask);
 
        if (INTEL_INFO(dev)->gen < 8) {
                PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -12693,7 +12752,7 @@ intel_pipe_config_compare(struct drm_device *dev,
        } else
                PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
 
-       PIPE_CONF_CHECK_I(has_dsi_encoder);
+       PIPE_CONF_CHECK_X(output_types);
 
        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
        PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12812,7 +12871,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
                            struct drm_crtc_state *new_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_ddb_allocation hw_ddb, *sw_ddb;
        struct skl_ddb_entry *hw_entry, *sw_entry;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -12918,7 +12977,7 @@ verify_crtc_state(struct drm_crtc *crtc,
                  struct drm_crtc_state *new_crtc_state)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *pipe_config, *sw_config;
@@ -12932,7 +12991,7 @@ verify_crtc_state(struct drm_crtc *crtc,
        pipe_config->base.crtc = crtc;
        pipe_config->base.state = old_state;
 
-       DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
+       DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
        active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
 
@@ -12961,8 +13020,10 @@ verify_crtc_state(struct drm_crtc *crtc,
                                "Encoder connected to wrong pipe %c\n",
                                pipe_name(pipe));
 
-               if (active)
+               if (active) {
+                       pipe_config->output_types |= 1 << encoder->type;
                        encoder->get_config(encoder, pipe_config);
+               }
        }
 
        if (!new_crtc_state->active)
@@ -13041,7 +13102,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
                         struct drm_crtc_state *old_crtc_state,
                         struct drm_crtc_state *new_crtc_state)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
        struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
 
@@ -13080,7 +13141,7 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc,
 static void
 verify_disabled_dpll_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13127,7 +13188,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
 
                crtc->scanline_offset = vtotal - 1;
        } else if (HAS_DDI(dev) &&
-                  intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+                  intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
                crtc->scanline_offset = 2;
        } else
                crtc->scanline_offset = 1;
@@ -13262,7 +13323,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
 static int intel_modeset_checks(struct drm_atomic_state *state)
 {
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = state->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(state->dev);
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
        int ret = 0, i;
@@ -13280,6 +13341,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
                        intel_state->active_crtcs |= 1 << i;
                else
                        intel_state->active_crtcs &= ~(1 << i);
+
+               if (crtc_state->active != crtc->state->active)
+                       intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
        }
 
        /*
@@ -13290,9 +13354,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
         * adjusted_mode bits in the crtc directly.
         */
        if (dev_priv->display.modeset_calc_cdclk) {
+               if (!intel_state->cdclk_pll_vco)
+                       intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
+               if (!intel_state->cdclk_pll_vco)
+                       intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
+
                ret = dev_priv->display.modeset_calc_cdclk(state);
+               if (ret < 0)
+                       return ret;
 
-               if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
+               if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
+                   intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
                        ret = intel_modeset_all_pipes(state);
 
                if (ret < 0)
@@ -13316,38 +13388,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
  * phase.  The code here should be run after the per-crtc and per-plane 'check'
  * handlers to ensure that all derived state has been updated.
  */
-static void calc_watermark_data(struct drm_atomic_state *state)
+static int calc_watermark_data(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
-       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_crtc *crtc;
-       struct drm_crtc_state *cstate;
-       struct drm_plane *plane;
-       struct drm_plane_state *pstate;
-
-       /*
-        * Calculate watermark configuration details now that derived
-        * plane/crtc state is all properly updated.
-        */
-       drm_for_each_crtc(crtc, dev) {
-               cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
-                       crtc->state;
-
-               if (cstate->active)
-                       intel_state->wm_config.num_pipes_active++;
-       }
-       drm_for_each_legacy_plane(plane, dev) {
-               pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
-                       plane->state;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
-               if (!to_intel_plane_state(pstate)->visible)
-                       continue;
+       /* Is there platform-specific watermark information to calculate? */
+       if (dev_priv->display.compute_global_watermarks)
+               return dev_priv->display.compute_global_watermarks(state);
 
-               intel_state->wm_config.sprites_enabled = true;
-               if (pstate->crtc_w != pstate->src_w >> 16 ||
-                   pstate->crtc_h != pstate->src_h >> 16)
-                       intel_state->wm_config.sprites_scaled = true;
-       }
+       return 0;
 }
 
 /**
@@ -13377,14 +13427,13 @@ static int intel_atomic_check(struct drm_device *dev,
                if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
                        crtc_state->mode_changed = true;
 
-               if (!crtc_state->enable) {
-                       if (needs_modeset(crtc_state))
-                               any_ms = true;
+               if (!needs_modeset(crtc_state))
                        continue;
-               }
 
-               if (!needs_modeset(crtc_state))
+               if (!crtc_state->enable) {
+                       any_ms = true;
                        continue;
+               }
 
                /* FIXME: For only active_changed we shouldn't need to do any
                 * state recomputation at all. */
@@ -13394,8 +13443,11 @@ static int intel_atomic_check(struct drm_device *dev,
                        return ret;
 
                ret = intel_modeset_pipe_config(crtc, pipe_config);
-               if (ret)
+               if (ret) {
+                       intel_dump_pipe_config(to_intel_crtc(crtc),
+                                              pipe_config, "[failed]");
                        return ret;
+               }
 
                if (i915.fastboot &&
                    intel_pipe_config_compare(dev,
@@ -13405,13 +13457,12 @@ static int intel_atomic_check(struct drm_device *dev,
                        to_intel_crtc_state(crtc_state)->update_pipe = true;
                }
 
-               if (needs_modeset(crtc_state)) {
+               if (needs_modeset(crtc_state))
                        any_ms = true;
 
-                       ret = drm_atomic_add_affected_planes(state, crtc);
-                       if (ret)
-                               return ret;
-               }
+               ret = drm_atomic_add_affected_planes(state, crtc);
+               if (ret)
+                       return ret;
 
                intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
                                       needs_modeset(crtc_state) ?
@@ -13431,27 +13482,20 @@ static int intel_atomic_check(struct drm_device *dev,
                return ret;
 
        intel_fbc_choose_crtc(dev_priv, state);
-       calc_watermark_data(state);
-
-       return 0;
+       return calc_watermark_data(state);
 }
 
 static int intel_atomic_prepare_commit(struct drm_device *dev,
                                       struct drm_atomic_state *state,
                                       bool nonblock)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_plane_state *plane_state;
        struct drm_crtc_state *crtc_state;
        struct drm_plane *plane;
        struct drm_crtc *crtc;
        int i, ret;
 
-       if (nonblock) {
-               DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
-               return -EINVAL;
-       }
-
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
                if (state->legacy_cursor_update)
                        continue;
@@ -13495,6 +13539,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
        return ret;
 }
 
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+
+       if (!dev->max_vblank_count)
+               return drm_accurate_vblank_count(&crtc->base);
+
+       return dev->driver->get_vblank_counter(dev, crtc->pipe);
+}
+
 static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
                                          struct drm_i915_private *dev_priv,
                                          unsigned crtc_mask)
@@ -13560,45 +13614,36 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
        return false;
 }
 
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
- * we can only handle plane-related operations and do not yet support
- * nonblocking commit.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-static int intel_atomic_commit(struct drm_device *dev,
-                              struct drm_atomic_state *state,
-                              bool nonblock)
+static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 {
+       struct drm_device *dev = state->dev;
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc_state *old_crtc_state;
        struct drm_crtc *crtc;
        struct intel_crtc_state *intel_cstate;
-       int ret = 0, i;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
        bool hw_check = intel_state->modeset;
        unsigned long put_domains[I915_MAX_PIPES] = {};
        unsigned crtc_vblank_mask = 0;
+       int i, ret;
 
-       ret = intel_atomic_prepare_commit(dev, state, nonblock);
-       if (ret) {
-               DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
-               return ret;
+       for_each_plane_in_state(state, plane, plane_state, i) {
+               struct intel_plane_state *intel_plane_state =
+                       to_intel_plane_state(plane_state);
+
+               if (!intel_plane_state->wait_req)
+                       continue;
+
+               ret = __i915_wait_request(intel_plane_state->wait_req,
+                                         true, NULL, NULL);
+               /* EIO should be eaten, and we can't get interrupted in the
+                * worker, and blocking commits have waited already. */
+               WARN_ON(ret);
        }
 
-       drm_atomic_helper_swap_state(dev, state);
-       dev_priv->wm.config = intel_state->wm_config;
-       intel_shared_dpll_commit(state);
+       drm_atomic_helper_wait_for_dependencies(state);
 
        if (intel_state->modeset) {
                memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
@@ -13653,7 +13698,8 @@ static int intel_atomic_commit(struct drm_device *dev,
                drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
 
                if (dev_priv->display.modeset_commit_cdclk &&
-                   intel_state->dev_cdclk != dev_priv->cdclk_freq)
+                   (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
+                    intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
                        dev_priv->display.modeset_commit_cdclk(state);
 
                intel_modeset_verify_disabled(dev);
@@ -13665,30 +13711,44 @@ static int intel_atomic_commit(struct drm_device *dev,
                bool modeset = needs_modeset(crtc->state);
                struct intel_crtc_state *pipe_config =
                        to_intel_crtc_state(crtc->state);
-               bool update_pipe = !modeset && pipe_config->update_pipe;
 
                if (modeset && crtc->state->active) {
                        update_scanline_offset(to_intel_crtc(crtc));
                        dev_priv->display.crtc_enable(crtc);
                }
 
+               /* Complete events for now disable pipes here. */
+               if (modeset && !crtc->state->active && crtc->state->event) {
+                       spin_lock_irq(&dev->event_lock);
+                       drm_crtc_send_vblank_event(crtc, crtc->state->event);
+                       spin_unlock_irq(&dev->event_lock);
+
+                       crtc->state->event = NULL;
+               }
+
                if (!modeset)
                        intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
 
                if (crtc->state->active &&
                    drm_atomic_get_existing_plane_state(state, crtc->primary))
-                       intel_fbc_enable(intel_crtc);
+                       intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
 
-               if (crtc->state->active &&
-                   (crtc->state->planes_changed || update_pipe))
+               if (crtc->state->active)
                        drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
 
                if (pipe_config->base.active && needs_vblank_wait(pipe_config))
                        crtc_vblank_mask |= 1 << i;
        }
 
-       /* FIXME: add subpixel order */
-
+       /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
+        * already, but still need the state for the delayed optimization. To
+        * fix this:
+        * - wrap the optimization/post_plane_update stuff into a per-crtc work.
+        * - schedule that vblank worker _before_ calling hw_done
+        * - at the start of commit_tail, cancel it _synchrously
+        * - switch over to the vblank wait helper in the core after that since
+        *   we don't need out special handling any more.
+        */
        if (!state->legacy_cursor_update)
                intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
 
@@ -13715,6 +13775,8 @@ static int intel_atomic_commit(struct drm_device *dev,
                intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
        }
 
+       drm_atomic_helper_commit_hw_done(state);
+
        if (intel_state->modeset)
                intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
 
@@ -13722,6 +13784,8 @@ static int intel_atomic_commit(struct drm_device *dev,
        drm_atomic_helper_cleanup_planes(dev, state);
        mutex_unlock(&dev->struct_mutex);
 
+       drm_atomic_helper_commit_cleanup_done(state);
+
        drm_atomic_state_free(state);
 
        /* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13736,6 +13800,86 @@ static int intel_atomic_commit(struct drm_device *dev,
         * can happen also when the device is completely off.
         */
        intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+}
+
+static void intel_atomic_commit_work(struct work_struct *work)
+{
+       struct drm_atomic_state *state = container_of(work,
+                                                     struct drm_atomic_state,
+                                                     commit_work);
+       intel_atomic_commit_tail(state);
+}
+
+static void intel_atomic_track_fbs(struct drm_atomic_state *state)
+{
+       struct drm_plane_state *old_plane_state;
+       struct drm_plane *plane;
+       struct drm_i915_gem_object *obj, *old_obj;
+       struct intel_plane *intel_plane;
+       int i;
+
+       mutex_lock(&state->dev->struct_mutex);
+       for_each_plane_in_state(state, plane, old_plane_state, i) {
+               obj = intel_fb_obj(plane->state->fb);
+               old_obj = intel_fb_obj(old_plane_state->fb);
+               intel_plane = to_intel_plane(plane);
+
+               i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
+       }
+       mutex_unlock(&state->dev->struct_mutex);
+}
+
+/**
+ * intel_atomic_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the top-level driver state object
+ * @nonblock: nonblocking commit
+ *
+ * This function commits a top-level state object that has been validated
+ * with drm_atomic_helper_check().
+ *
+ * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
+ * nonblocking commits are only safe for pure plane updates. Everything else
+ * should work though.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+static int intel_atomic_commit(struct drm_device *dev,
+                              struct drm_atomic_state *state,
+                              bool nonblock)
+{
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int ret = 0;
+
+       if (intel_state->modeset && nonblock) {
+               DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
+               return -EINVAL;
+       }
+
+       ret = drm_atomic_helper_setup_commit(state, nonblock);
+       if (ret)
+               return ret;
+
+       INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+
+       ret = intel_atomic_prepare_commit(dev, state, nonblock);
+       if (ret) {
+               DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+               return ret;
+       }
+
+       drm_atomic_helper_swap_state(state, true);
+       dev_priv->wm.distrust_bios_wm = false;
+       dev_priv->wm.skl_results = intel_state->wm_results;
+       intel_shared_dpll_commit(state);
+       intel_atomic_track_fbs(state);
+
+       if (nonblock)
+               queue_work(system_unbound_wq, &state->commit_work);
+       else
+               intel_atomic_commit_tail(state);
 
        return 0;
 }
@@ -13749,8 +13893,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
 
        state = drm_atomic_state_alloc(dev);
        if (!state) {
-               DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
-                             crtc->base.id);
+               DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
+                             crtc->base.id, crtc->name);
                return;
        }
 
@@ -13780,8 +13924,50 @@ out:
 
 #undef for_each_intel_crtc_masked
 
+/*
+ * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
+ *        drm_atomic_helper_legacy_gamma_set() directly.
+ */
+static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
+                                        u16 *red, u16 *green, u16 *blue,
+                                        uint32_t size)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_crtc_state *state;
+       int ret;
+
+       ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
+       if (ret)
+               return ret;
+
+       /*
+        * Make sure we update the legacy properties so this works when
+        * atomic is not enabled.
+        */
+
+       state = crtc->state;
+
+       drm_object_property_set_value(&crtc->base,
+                                     config->degamma_lut_property,
+                                     (state->degamma_lut) ?
+                                     state->degamma_lut->base.id : 0);
+
+       drm_object_property_set_value(&crtc->base,
+                                     config->ctm_property,
+                                     (state->ctm) ?
+                                     state->ctm->base.id : 0);
+
+       drm_object_property_set_value(&crtc->base,
+                                     config->gamma_lut_property,
+                                     (state->gamma_lut) ?
+                                     state->gamma_lut->base.id : 0);
+
+       return 0;
+}
+
 static const struct drm_crtc_funcs intel_crtc_funcs = {
-       .gamma_set = drm_atomic_helper_legacy_gamma_set,
+       .gamma_set = intel_atomic_legacy_gamma_set,
        .set_config = drm_atomic_helper_set_config,
        .set_property = drm_atomic_helper_crtc_set_property,
        .destroy = intel_crtc_destroy,
@@ -13810,9 +13996,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 {
        struct drm_device *dev = plane->dev;
        struct drm_framebuffer *fb = new_state->fb;
-       struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+       struct reservation_object *resv;
        int ret = 0;
 
        if (!obj && !old_obj)
@@ -13842,12 +14028,15 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                }
        }
 
+       if (!obj)
+               return 0;
+
        /* For framebuffer backed by dmabuf, wait for fence */
-       if (obj && obj->base.dma_buf) {
+       resv = i915_gem_object_get_dmabuf_resv(obj);
+       if (resv) {
                long lret;
 
-               lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
-                                                          false, true,
+               lret = reservation_object_wait_timeout_rcu(resv, false, true,
                                                           MAX_SCHEDULE_TIMEOUT);
                if (lret == -ERESTARTSYS)
                        return lret;
@@ -13855,9 +14044,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                WARN(lret < 0, "waiting returns %li\n", lret);
        }
 
-       if (!obj) {
-               ret = 0;
-       } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+       if (plane->type == DRM_PLANE_TYPE_CURSOR &&
            INTEL_INFO(dev)->cursor_needs_physical) {
                int align = IS_I830(dev) ? 16 * 1024 : 256;
                ret = i915_gem_object_attach_phys(obj, align);
@@ -13868,15 +14055,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
        }
 
        if (ret == 0) {
-               if (obj) {
-                       struct intel_plane_state *plane_state =
-                               to_intel_plane_state(new_state);
+               struct intel_plane_state *plane_state =
+                       to_intel_plane_state(new_state);
 
-                       i915_gem_request_assign(&plane_state->wait_req,
-                                               obj->last_write_req);
-               }
-
-               i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
+               i915_gem_request_assign(&plane_state->wait_req,
+                                       obj->last_write_req);
        }
 
        return ret;
@@ -13896,7 +14079,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
                       const struct drm_plane_state *old_state)
 {
        struct drm_device *dev = plane->dev;
-       struct intel_plane *intel_plane = to_intel_plane(plane);
        struct intel_plane_state *old_intel_state;
        struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
        struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -13910,11 +14092,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
            !INTEL_INFO(dev)->cursor_needs_physical))
                intel_unpin_fb_obj(old_state->fb, old_state->rotation);
 
-       /* prepare_fb aborted? */
-       if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
-           (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
-               i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
-
        i915_gem_request_assign(&old_intel_state->wait_req, NULL);
 }
 
@@ -13922,15 +14099,11 @@ int
 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
 {
        int max_scale;
-       struct drm_device *dev;
-       struct drm_i915_private *dev_priv;
        int crtc_clock, cdclk;
 
        if (!intel_crtc || !crtc_state->base.enable)
                return DRM_PLANE_HELPER_NO_SCALING;
 
-       dev = intel_crtc->base.dev;
-       dev_priv = dev->dev_private;
        crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
        cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
 
@@ -13970,6 +14143,7 @@ intel_check_primary_plane(struct drm_plane *plane,
 
        return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
                                             &state->dst, &state->clip,
+                                            state->base.rotation,
                                             min_scale, max_scale,
                                             can_position, true,
                                             &state->visible);
@@ -14006,7 +14180,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       intel_pipe_update_end(intel_crtc);
+       intel_pipe_update_end(intel_crtc, NULL);
 }
 
 /**
@@ -14018,9 +14192,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
  */
 void intel_plane_destroy(struct drm_plane *plane)
 {
-       struct intel_plane *intel_plane = to_intel_plane(plane);
+       if (!plane)
+               return;
+
        drm_plane_cleanup(plane);
-       kfree(intel_plane);
+       kfree(to_intel_plane(plane));
 }
 
 const struct drm_plane_funcs intel_plane_funcs = {
@@ -14092,10 +14268,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
                primary->disable_plane = i9xx_disable_primary_plane;
        }
 
-       ret = drm_universal_plane_init(dev, &primary->base, 0,
-                                      &intel_plane_funcs,
-                                      intel_primary_formats, num_formats,
-                                      DRM_PLANE_TYPE_PRIMARY, NULL);
+       if (INTEL_INFO(dev)->gen >= 9)
+               ret = drm_universal_plane_init(dev, &primary->base, 0,
+                                              &intel_plane_funcs,
+                                              intel_primary_formats, num_formats,
+                                              DRM_PLANE_TYPE_PRIMARY,
+                                              "plane 1%c", pipe_name(pipe));
+       else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+               ret = drm_universal_plane_init(dev, &primary->base, 0,
+                                              &intel_plane_funcs,
+                                              intel_primary_formats, num_formats,
+                                              DRM_PLANE_TYPE_PRIMARY,
+                                              "primary %c", pipe_name(pipe));
+       else
+               ret = drm_universal_plane_init(dev, &primary->base, 0,
+                                              &intel_plane_funcs,
+                                              intel_primary_formats, num_formats,
+                                              DRM_PLANE_TYPE_PRIMARY,
+                                              "plane %c", plane_name(primary->plane));
        if (ret)
                goto fail;
 
@@ -14145,6 +14335,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
 
        ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
                                            &state->dst, &state->clip,
+                                           state->base.rotation,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            DRM_PLANE_HELPER_NO_SCALING,
                                            true, true, &state->visible);
@@ -14253,7 +14444,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
                                       &intel_plane_funcs,
                                       intel_cursor_formats,
                                       ARRAY_SIZE(intel_cursor_formats),
-                                      DRM_PLANE_TYPE_CURSOR, NULL);
+                                      DRM_PLANE_TYPE_CURSOR,
+                                      "cursor %c", pipe_name(pipe));
        if (ret)
                goto fail;
 
@@ -14301,7 +14493,7 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
 
 static void intel_crtc_init(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc;
        struct intel_crtc_state *crtc_state = NULL;
        struct drm_plane *primary = NULL;
@@ -14338,7 +14530,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
                goto fail;
 
        ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
-                                       cursor, &intel_crtc_funcs, NULL);
+                                       cursor, &intel_crtc_funcs,
+                                       "pipe %c", pipe_name(pipe));
        if (ret)
                goto fail;
 
@@ -14372,10 +14565,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        return;
 
 fail:
-       if (primary)
-               drm_plane_cleanup(primary);
-       if (cursor)
-               drm_plane_cleanup(cursor);
+       intel_plane_destroy(primary);
+       intel_plane_destroy(cursor);
        kfree(crtc_state);
        kfree(intel_crtc);
 }
@@ -14401,11 +14592,8 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
        struct intel_crtc *crtc;
 
        drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
-
-       if (!drmmode_crtc) {
-               DRM_ERROR("no such CRTC id\n");
+       if (!drmmode_crtc)
                return -ENOENT;
-       }
 
        crtc = to_intel_crtc(drmmode_crtc);
        pipe_from_crtc_id->pipe = crtc->pipe;
@@ -14432,7 +14620,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
 
 static bool has_edp_a(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!IS_MOBILE(dev))
                return false;
@@ -14448,7 +14636,7 @@ static bool has_edp_a(struct drm_device *dev)
 
 static bool intel_crt_present(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen >= 9)
                return false;
@@ -14474,10 +14662,15 @@ static bool intel_crt_present(struct drm_device *dev)
 
 static void intel_setup_outputs(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        bool dpd_is_edp = false;
 
+       /*
+        * intel_edp_init_connector() depends on this completing first, to
+        * prevent the registeration of both eDP and LVDS and the incorrect
+        * sharing of the PPS.
+        */
        intel_lvds_init(dev);
 
        if (intel_crt_present(dev))
@@ -14554,6 +14747,8 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(PCH_DP_D) & DP_DETECTED)
                        intel_dp_init(dev, PCH_DP_D, PORT_D);
        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+               bool has_edp, has_port;
+
                /*
                 * The DP_DETECTED bit is the latched state of the DDC
                 * SDA pin at boot. However since eDP doesn't require DDC
@@ -14562,27 +14757,37 @@ static void intel_setup_outputs(struct drm_device *dev)
                 * Thus we can't rely on the DP_DETECTED bit alone to detect
                 * eDP ports. Consult the VBT as well as DP_DETECTED to
                 * detect eDP ports.
+                *
+                * Sadly the straps seem to be missing sometimes even for HDMI
+                * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+                * and VBT for the presence of the port. Additionally we can't
+                * trust the port type the VBT declares as we've seen at least
+                * HDMI ports that the VBT claim are DP or eDP.
                 */
-               if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
-                   !intel_dp_is_edp(dev, PORT_B))
+               has_edp = intel_dp_is_edp(dev, PORT_B);
+               has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+               if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+                       has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+               if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
                        intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
-               if (I915_READ(VLV_DP_B) & DP_DETECTED ||
-                   intel_dp_is_edp(dev, PORT_B))
-                       intel_dp_init(dev, VLV_DP_B, PORT_B);
 
-               if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
-                   !intel_dp_is_edp(dev, PORT_C))
+               has_edp = intel_dp_is_edp(dev, PORT_C);
+               has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+               if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+                       has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+               if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
                        intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
-               if (I915_READ(VLV_DP_C) & DP_DETECTED ||
-                   intel_dp_is_edp(dev, PORT_C))
-                       intel_dp_init(dev, VLV_DP_C, PORT_C);
 
                if (IS_CHERRYVIEW(dev)) {
-                       /* eDP not supported on port D, so don't check VBT */
-                       if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
-                               intel_hdmi_init(dev, CHV_HDMID, PORT_D);
-                       if (I915_READ(CHV_DP_D) & DP_DETECTED)
+                       /*
+                        * eDP not supported on port D,
+                        * so no need to worry about it
+                        */
+                       has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+                       if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
                                intel_dp_init(dev, CHV_DP_D, PORT_D);
+                       if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+                               intel_hdmi_init(dev, CHV_HDMID, PORT_D);
                }
 
                intel_dsi_init(dev);
@@ -15050,12 +15255,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                dev_priv->display.fdi_link_train = hsw_fdi_link_train;
-               if (IS_BROADWELL(dev_priv)) {
-                       dev_priv->display.modeset_commit_cdclk =
-                               broadwell_modeset_commit_cdclk;
-                       dev_priv->display.modeset_calc_cdclk =
-                               broadwell_modeset_calc_cdclk;
-               }
+       }
+
+       if (IS_BROADWELL(dev_priv)) {
+               dev_priv->display.modeset_commit_cdclk =
+                       broadwell_modeset_commit_cdclk;
+               dev_priv->display.modeset_calc_cdclk =
+                       broadwell_modeset_calc_cdclk;
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                dev_priv->display.modeset_commit_cdclk =
                        valleyview_modeset_commit_cdclk;
@@ -15063,9 +15269,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
                        valleyview_modeset_calc_cdclk;
        } else if (IS_BROXTON(dev_priv)) {
                dev_priv->display.modeset_commit_cdclk =
-                       broxton_modeset_commit_cdclk;
+                       bxt_modeset_commit_cdclk;
+               dev_priv->display.modeset_calc_cdclk =
+                       bxt_modeset_calc_cdclk;
+       } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+               dev_priv->display.modeset_commit_cdclk =
+                       skl_modeset_commit_cdclk;
                dev_priv->display.modeset_calc_cdclk =
-                       broxton_modeset_calc_cdclk;
+                       skl_modeset_calc_cdclk;
        }
 
        switch (INTEL_INFO(dev_priv)->gen) {
@@ -15104,7 +15315,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
  */
 static void quirk_pipea_force(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->quirks |= QUIRK_PIPEA_FORCE;
        DRM_INFO("applying pipe a force quirk\n");
@@ -15112,7 +15323,7 @@ static void quirk_pipea_force(struct drm_device *dev)
 
 static void quirk_pipeb_force(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->quirks |= QUIRK_PIPEB_FORCE;
        DRM_INFO("applying pipe b force quirk\n");
@@ -15123,7 +15334,7 @@ static void quirk_pipeb_force(struct drm_device *dev)
  */
 static void quirk_ssc_force_disable(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
        DRM_INFO("applying lvds SSC disable quirk\n");
 }
@@ -15134,7 +15345,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
  */
 static void quirk_invert_brightness(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
        DRM_INFO("applying inverted panel brightness quirk\n");
 }
@@ -15142,7 +15353,7 @@ static void quirk_invert_brightness(struct drm_device *dev)
 /* Some VBT's incorrectly indicate no backlight is present */
 static void quirk_backlight_present(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
        DRM_INFO("applying backlight present quirk\n");
 }
@@ -15268,7 +15479,7 @@ static void intel_init_quirks(struct drm_device *dev)
 /* Disable the VGA plane that we never use */
 static void i915_disable_vga(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u8 sr1;
        i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
@@ -15286,14 +15497,14 @@ static void i915_disable_vga(struct drm_device *dev)
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_update_cdclk(dev);
 
        dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
 
        intel_init_clock_gating(dev);
-       intel_enable_gt_powersave(dev);
+       intel_enable_gt_powersave(dev_priv);
 }
 
 /*
@@ -15363,7 +15574,6 @@ retry:
        }
 
        /* Write calculated watermark values back */
-       to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
        for_each_crtc_in_state(state, crtc, cstate, i) {
                struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
 
@@ -15461,11 +15671,13 @@ void intel_modeset_init(struct drm_device *dev)
        }
 
        intel_update_czclk(dev_priv);
-       intel_update_rawclk(dev_priv);
        intel_update_cdclk(dev);
 
        intel_shared_dpll_init(dev);
 
+       if (dev_priv->max_cdclk_freq == 0)
+               intel_update_max_cdclk(dev);
+
        /* Just disable it once at startup */
        i915_disable_vga(dev);
        intel_setup_outputs(dev);
@@ -15533,7 +15745,7 @@ static bool
 intel_check_plane_mapping(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val;
 
        if (INTEL_INFO(dev)->num_pipes == 1)
@@ -15573,7 +15785,7 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
 static void intel_sanitize_crtc(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
 
        /* Clear any frame start delays used for debugging left by the BIOS */
@@ -15606,8 +15818,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
        if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
                bool plane;
 
-               DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
-                             crtc->base.base.id);
+               DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
+                             crtc->base.base.id, crtc->base.name);
 
                /* Pipe has the wrong plane attached and the plane is active.
                 * Temporarily change the plane mapping and disable everything
@@ -15698,7 +15910,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 
 void i915_redisable_vga_power_on(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
 
        if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
@@ -15709,7 +15921,7 @@ void i915_redisable_vga_power_on(struct drm_device *dev)
 
 void i915_redisable_vga(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* This function can be called both from intel_modeset_setup_hw_state or
         * at a very early point in our resume sequence, where the power well
@@ -15749,7 +15961,7 @@ static void readout_plane_state(struct intel_crtc *crtc)
 
 static void intel_modeset_readout_hw_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
        struct intel_crtc *crtc;
        struct intel_encoder *encoder;
@@ -15775,26 +15987,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                if (crtc_state->base.active) {
                        dev_priv->active_crtcs |= 1 << crtc->pipe;
 
-                       if (IS_BROADWELL(dev_priv)) {
+                       if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
                                pixclk = ilk_pipe_pixel_rate(crtc_state);
-
-                               /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-                               if (crtc_state->ips_enabled)
-                                       pixclk = DIV_ROUND_UP(pixclk * 100, 95);
-                       } else if (IS_VALLEYVIEW(dev_priv) ||
-                                  IS_CHERRYVIEW(dev_priv) ||
-                                  IS_BROXTON(dev_priv))
+                       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                                pixclk = crtc_state->base.adjusted_mode.crtc_clock;
                        else
                                WARN_ON(dev_priv->display.modeset_calc_cdclk);
+
+                       /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+                       if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+                               pixclk = DIV_ROUND_UP(pixclk * 100, 95);
                }
 
                dev_priv->min_pixclk[crtc->pipe] = pixclk;
 
                readout_plane_state(crtc);
 
-               DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
-                             crtc->base.base.id,
+               DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
+                             crtc->base.base.id, crtc->base.name,
                              crtc->active ? "enabled" : "disabled");
        }
 
@@ -15820,6 +16030,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                if (encoder->get_hw_state(encoder, &pipe)) {
                        crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
                        encoder->base.crtc = &crtc->base;
+                       crtc->config->output_types |= 1 << encoder->type;
                        encoder->get_config(encoder, crtc->config);
                } else {
                        encoder->base.crtc = NULL;
@@ -15904,7 +16115,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 static void
 intel_modeset_setup_hw_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
        struct intel_crtc *crtc;
        struct intel_encoder *encoder;
@@ -16025,15 +16236,16 @@ retry:
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
        struct drm_i915_gem_object *obj;
        int ret;
 
-       intel_init_gt_powersave(dev);
+       intel_init_gt_powersave(dev_priv);
 
        intel_modeset_init_hw(dev);
 
-       intel_setup_overlay(dev);
+       intel_setup_overlay(dev_priv);
 
        /*
         * Make sure any fbs we allocated at startup are properly
@@ -16059,26 +16271,36 @@ void intel_modeset_gem_init(struct drm_device *dev)
                        c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
                }
        }
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       int ret;
+
+       ret = intel_backlight_device_register(intel_connector);
+       if (ret)
+               goto err;
+
+       return 0;
 
-       intel_backlight_register(dev);
+err:
+       return ret;
 }
 
-void intel_connector_unregister(struct intel_connector *intel_connector)
+void intel_connector_unregister(struct drm_connector *connector)
 {
-       struct drm_connector *connector = &intel_connector->base;
+       struct intel_connector *intel_connector = to_intel_connector(connector);
 
+       intel_backlight_device_unregister(intel_connector);
        intel_panel_destroy_backlight(connector);
-       drm_connector_unregister(connector);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_connector *connector;
-
-       intel_disable_gt_powersave(dev);
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
-       intel_backlight_unregister(dev);
+       intel_disable_gt_powersave(dev_priv);
 
        /*
         * Interrupts and polling as the first thing to avoid creating havoc.
@@ -16100,27 +16322,15 @@ void intel_modeset_cleanup(struct drm_device *dev)
        /* flush any delayed tasks or pending work */
        flush_scheduled_work();
 
-       /* destroy the backlight and sysfs files before encoders/connectors */
-       for_each_intel_connector(dev, connector)
-               connector->unregister(connector);
-
        drm_mode_config_cleanup(dev);
 
-       intel_cleanup_overlay(dev);
+       intel_cleanup_overlay(dev_priv);
 
-       intel_cleanup_gt_powersave(dev);
+       intel_cleanup_gt_powersave(dev_priv);
 
        intel_teardown_gmbus(dev);
 }
 
-/*
- * Return which encoder is currently attached for connector.
- */
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
-{
-       return &intel_attached_encoder(connector)->base;
-}
-
 void intel_connector_attach_encoder(struct intel_connector *connector,
                                    struct intel_encoder *encoder)
 {
@@ -16134,7 +16344,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
  */
 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
        u16 gmch_ctrl;
 
@@ -16204,9 +16414,8 @@ struct intel_display_error_state {
 };
 
 struct intel_display_error_state *
-intel_display_capture_error_state(struct drm_device *dev)
+intel_display_capture_error_state(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_display_error_state *error;
        int transcoders[] = {
                TRANSCODER_A,
@@ -16216,14 +16425,14 @@ intel_display_capture_error_state(struct drm_device *dev)
        };
        int i;
 
-       if (INTEL_INFO(dev)->num_pipes == 0)
+       if (INTEL_INFO(dev_priv)->num_pipes == 0)
                return NULL;
 
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
        if (error == NULL)
                return NULL;
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
        for_each_pipe(dev_priv, i) {
@@ -16239,25 +16448,25 @@ intel_display_capture_error_state(struct drm_device *dev)
 
                error->plane[i].control = I915_READ(DSPCNTR(i));
                error->plane[i].stride = I915_READ(DSPSTRIDE(i));
-               if (INTEL_INFO(dev)->gen <= 3) {
+               if (INTEL_GEN(dev_priv) <= 3) {
                        error->plane[i].size = I915_READ(DSPSIZE(i));
                        error->plane[i].pos = I915_READ(DSPPOS(i));
                }
-               if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
+               if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
                        error->plane[i].addr = I915_READ(DSPADDR(i));
-               if (INTEL_INFO(dev)->gen >= 4) {
+               if (INTEL_GEN(dev_priv) >= 4) {
                        error->plane[i].surface = I915_READ(DSPSURF(i));
                        error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
                }
 
                error->pipe[i].source = I915_READ(PIPESRC(i));
 
-               if (HAS_GMCH_DISPLAY(dev))
+               if (HAS_GMCH_DISPLAY(dev_priv))
                        error->pipe[i].stat = I915_READ(PIPESTAT(i));
        }
 
        /* Note: this does not include DSI transcoders. */
-       error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+       error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
        if (HAS_DDI(dev_priv))
                error->num_transcoders++; /* Account for eDP. */
 
@@ -16291,7 +16500,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
                                struct drm_device *dev,
                                struct intel_display_error_state *error)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        if (!error)
index f192f58708c25dbedf74994b4a377098093f0dc3..21b04c3eda41bf6f7685b6080b9e4a5228f3a116 100644 (file)
@@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
                                      enum pipe pipe);
 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
 
-static unsigned int intel_dp_unused_lane_mask(int lane_count)
-{
-       return ~((1 << lane_count) - 1) & 0xf;
-}
-
 static int
 intel_dp_max_link_bw(struct intel_dp  *intel_dp)
 {
@@ -267,7 +262,7 @@ static void pps_lock(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *encoder = &intel_dig_port->base;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
 
        /*
@@ -285,7 +280,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *encoder = &intel_dig_port->base;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
 
        mutex_unlock(&dev_priv->pps_mutex);
@@ -299,7 +294,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe = intel_dp->pps_pipe;
        bool pll_enabled, release_cl_override = false;
        enum dpio_phy phy = DPIO_PHY(pipe);
@@ -373,7 +368,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
        enum pipe pipe;
@@ -431,6 +426,37 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
        return intel_dp->pps_pipe;
 }
 
+static int
+bxt_power_sequencer_idx(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       lockdep_assert_held(&dev_priv->pps_mutex);
+
+       /* We should never land here with regular DP ports */
+       WARN_ON(!is_edp(intel_dp));
+
+       /*
+        * TODO: BXT has 2 PPS instances. The correct port->PPS instance
+        * mapping needs to be retrieved from VBT, for now just hard-code to
+        * use instance #0 always.
+        */
+       if (!intel_dp->pps_reset)
+               return 0;
+
+       intel_dp->pps_reset = false;
+
+       /*
+        * Only the HW needs to be reprogrammed, the SW state is fixed and
+        * has been setup during connector init.
+        */
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+
+       return 0;
+}
+
 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
                               enum pipe pipe);
 
@@ -480,7 +506,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -512,12 +538,13 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
        intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 }
 
-void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_encoder *encoder;
 
-       if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
+       if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+                   !IS_BROXTON(dev)))
                return;
 
        /*
@@ -537,34 +564,71 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
                        continue;
 
                intel_dp = enc_to_intel_dp(&encoder->base);
-               intel_dp->pps_pipe = INVALID_PIPE;
+               if (IS_BROXTON(dev))
+                       intel_dp->pps_reset = true;
+               else
+                       intel_dp->pps_pipe = INVALID_PIPE;
+       }
+}
+
+struct pps_registers {
+       i915_reg_t pp_ctrl;
+       i915_reg_t pp_stat;
+       i915_reg_t pp_on;
+       i915_reg_t pp_off;
+       i915_reg_t pp_div;
+};
+
+static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
+                                   struct intel_dp *intel_dp,
+                                   struct pps_registers *regs)
+{
+       memset(regs, 0, sizeof(*regs));
+
+       if (IS_BROXTON(dev_priv)) {
+               int idx = bxt_power_sequencer_idx(intel_dp);
+
+               regs->pp_ctrl = BXT_PP_CONTROL(idx);
+               regs->pp_stat = BXT_PP_STATUS(idx);
+               regs->pp_on = BXT_PP_ON_DELAYS(idx);
+               regs->pp_off = BXT_PP_OFF_DELAYS(idx);
+       } else if (HAS_PCH_SPLIT(dev_priv)) {
+               regs->pp_ctrl = PCH_PP_CONTROL;
+               regs->pp_stat = PCH_PP_STATUS;
+               regs->pp_on = PCH_PP_ON_DELAYS;
+               regs->pp_off = PCH_PP_OFF_DELAYS;
+               regs->pp_div = PCH_PP_DIVISOR;
+       } else {
+               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+               regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe);
+               regs->pp_stat = VLV_PIPE_PP_STATUS(pipe);
+               regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe);
+               regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe);
+               regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe);
        }
 }
 
 static i915_reg_t
 _pp_ctrl_reg(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct pps_registers regs;
 
-       if (IS_BROXTON(dev))
-               return BXT_PP_CONTROL(0);
-       else if (HAS_PCH_SPLIT(dev))
-               return PCH_PP_CONTROL;
-       else
-               return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+       intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
+                               &regs);
+
+       return regs.pp_ctrl;
 }
 
 static i915_reg_t
 _pp_stat_reg(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct pps_registers regs;
 
-       if (IS_BROXTON(dev))
-               return BXT_PP_STATUS(0);
-       else if (HAS_PCH_SPLIT(dev))
-               return PCH_PP_STATUS;
-       else
-               return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+       intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
+                               &regs);
+
+       return regs.pp_stat;
 }
 
 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
@@ -575,7 +639,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
        struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
                                                 edp_notifier);
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!is_edp(intel_dp) || code != SYS_RESTART)
                return 0;
@@ -606,7 +670,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
 static bool edp_have_panel_power(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -620,7 +684,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -635,7 +699,7 @@ static void
 intel_dp_check_edp(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (!is_edp(intel_dp))
                return;
@@ -653,7 +717,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
        uint32_t status;
        bool done;
@@ -663,7 +727,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
                done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
                                          msecs_to_jiffies_timeout(10));
        else
-               done = wait_for_atomic(C, 10) == 0;
+               done = wait_for(C, 10) == 0;
        if (!done)
                DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
                          has_aux_irq);
@@ -775,6 +839,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
               DP_AUX_CH_CTL_TIME_OUT_1600us |
               DP_AUX_CH_CTL_RECEIVE_ERROR |
               (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+              DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
               DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
 }
 
@@ -785,7 +850,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
        uint32_t aux_clock_divider;
        int i, ret, recv_bytes;
@@ -1181,48 +1246,21 @@ static void intel_aux_reg_init(struct intel_dp *intel_dp)
 static void
 intel_dp_aux_fini(struct intel_dp *intel_dp)
 {
-       drm_dp_aux_unregister(&intel_dp->aux);
        kfree(intel_dp->aux.name);
 }
 
-static int
+static void
 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        enum port port = intel_dig_port->port;
-       int ret;
 
        intel_aux_reg_init(intel_dp);
+       drm_dp_aux_init(&intel_dp->aux);
 
+       /* Failure to allocate our preferred name is not critical */
        intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
-       if (!intel_dp->aux.name)
-               return -ENOMEM;
-
-       intel_dp->aux.dev = connector->base.kdev;
        intel_dp->aux.transfer = intel_dp_aux_transfer;
-
-       DRM_DEBUG_KMS("registering %s bus for %s\n",
-                     intel_dp->aux.name,
-                     connector->base.kdev->kobj.name);
-
-       ret = drm_dp_aux_register(&intel_dp->aux);
-       if (ret < 0) {
-               DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
-                         intel_dp->aux.name, ret);
-               kfree(intel_dp->aux.name);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void
-intel_dp_connector_unregister(struct intel_connector *intel_connector)
-{
-       struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
-
-       intel_dp_aux_fini(intel_dp);
-       intel_connector_unregister(intel_connector);
 }
 
 static int
@@ -1435,7 +1473,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1463,7 +1501,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
                pipe_config->has_pch_encoder = true;
 
-       pipe_config->has_dp_encoder = true;
        pipe_config->has_drrs = false;
        pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
 
@@ -1582,6 +1619,27 @@ found:
                                &pipe_config->dp_m2_n2);
        }
 
+       /*
+        * DPLL0 VCO may need to be adjusted to get the correct
+        * clock for eDP. This will affect cdclk as well.
+        */
+       if (is_edp(intel_dp) &&
+           (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
+               int vco;
+
+               switch (pipe_config->port_clock / 2) {
+               case 108000:
+               case 216000:
+                       vco = 8640000;
+                       break;
+               default:
+                       vco = 8100000;
+                       break;
+               }
+
+               to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
+       }
+
        if (!HAS_DDI(dev))
                intel_dp_set_clock(encoder, pipe_config);
 
@@ -1598,7 +1656,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
 static void intel_dp_prepare(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -1686,16 +1744,21 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
 #define IDLE_CYCLE_MASK                (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
 #define IDLE_CYCLE_VALUE       (0     | PP_SEQUENCE_NONE | 0                     | PP_SEQUENCE_STATE_OFF_IDLE)
 
+static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
+                                  struct intel_dp *intel_dp);
+
 static void wait_panel_status(struct intel_dp *intel_dp,
                                       u32 mask,
                                       u32 value)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t pp_stat_reg, pp_ctrl_reg;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
+       intel_pps_verify_state(dev_priv, intel_dp);
+
        pp_stat_reg = _pp_stat_reg(intel_dp);
        pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
@@ -1704,8 +1767,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
                        I915_READ(pp_stat_reg),
                        I915_READ(pp_ctrl_reg));
 
-       if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
-                     5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
+       if (intel_wait_for_register(dev_priv,
+                                   pp_stat_reg, mask, value,
+                                   5000))
                DRM_ERROR("Panel status timeout: status %08x control %08x\n",
                                I915_READ(pp_stat_reg),
                                I915_READ(pp_ctrl_reg));
@@ -1765,7 +1829,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
 static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 control;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1788,7 +1852,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        u32 pp;
        i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -1861,7 +1925,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *intel_dig_port =
                dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1930,8 +1994,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
  */
 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 {
-       struct drm_i915_private *dev_priv =
-               intel_dp_to_dev(intel_dp)->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
@@ -1952,7 +2015,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
 static void edp_panel_on(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2013,7 +2076,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        u32 pp;
        i915_reg_t pp_ctrl_reg;
@@ -2065,7 +2128,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2106,7 +2169,7 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pp;
        i915_reg_t pp_ctrl_reg;
 
@@ -2222,7 +2285,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
         * 2. Program DP PLL enable
         */
        if (IS_GEN5(dev_priv))
-               intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
+               intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
 
        intel_dp->DP |= DP_PLL_ENABLE;
 
@@ -2287,7 +2350,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        u32 tmp;
        bool ret;
@@ -2340,7 +2403,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        u32 tmp, flags = 0;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
@@ -2378,8 +2441,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
            !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
                pipe_config->limited_color_range = true;
 
-       pipe_config->has_dp_encoder = true;
-
        pipe_config->lane_count =
                ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
 
@@ -2460,55 +2521,11 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
        intel_dp_link_down(intel_dp);
 }
 
-static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
-                                    bool reset)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       enum pipe pipe = crtc->pipe;
-       uint32_t val;
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
-       if (reset)
-               val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       else
-               val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
-       if (crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
-               if (reset)
-                       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-               else
-                       val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
-       }
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       if (reset)
-               val &= ~DPIO_PCS_CLK_SOFT_RESET;
-       else
-               val |= DPIO_PCS_CLK_SOFT_RESET;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
-       if (crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
-               val |= CHV_PCS_REQ_SOFTRESET_EN;
-               if (reset)
-                       val &= ~DPIO_PCS_CLK_SOFT_RESET;
-               else
-                       val |= DPIO_PCS_CLK_SOFT_RESET;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
-       }
-}
-
 static void chv_post_disable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_dp_link_down(intel_dp);
 
@@ -2527,7 +2544,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
 
        if (HAS_DDI(dev)) {
@@ -2607,7 +2624,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
 static void intel_dp_enable_port(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc =
                to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
 
@@ -2636,7 +2653,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        uint32_t dp_reg = I915_READ(intel_dp->output_reg);
        enum pipe pipe = crtc->pipe;
@@ -2709,7 +2726,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
        enum pipe pipe = intel_dp->pps_pipe;
        i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
 
@@ -2735,7 +2752,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
 static void vlv_steal_power_sequencer(struct drm_device *dev,
                                      enum pipe pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2773,7 +2790,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *encoder = &intel_dig_port->base;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2811,266 +2828,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
 
 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
 {
-       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       int pipe = intel_crtc->pipe;
-       u32 val;
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
-       val = 0;
-       if (pipe)
-               val |= (1<<21);
-       else
-               val &= ~(1<<21);
-       val |= 0x001000c4;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
-
-       mutex_unlock(&dev_priv->sb_lock);
+       vlv_phy_pre_encoder_enable(encoder);
 
        intel_enable_dp(encoder);
 }
 
 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       int pipe = intel_crtc->pipe;
-
        intel_dp_prepare(encoder);
 
-       /* Program Tx lane resets to default */
-       mutex_lock(&dev_priv->sb_lock);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
-                        DPIO_PCS_TX_LANE2_RESET |
-                        DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
-                        DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
-                        DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
-                        (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
-                                DPIO_PCS_CLK_SOFT_RESET);
-
-       /* Fix up inter-pair skew failure */
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
-       mutex_unlock(&dev_priv->sb_lock);
+       vlv_phy_pre_pll_enable(encoder);
 }
 
 static void chv_pre_enable_dp(struct intel_encoder *encoder)
 {
-       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       int pipe = intel_crtc->pipe;
-       int data, i, stagger;
-       u32 val;
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       /* allow hardware to manage TX FIFO reset source */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
-       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
-       if (intel_crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
-               val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-       }
-
-       /* Program Tx lane latency optimal setting*/
-       for (i = 0; i < intel_crtc->config->lane_count; i++) {
-               /* Set the upar bit */
-               if (intel_crtc->config->lane_count == 1)
-                       data = 0x0;
-               else
-                       data = (i == 1) ? 0x0 : 0x1;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
-                               data << DPIO_UPAR_SHIFT);
-       }
-
-       /* Data lane stagger programming */
-       if (intel_crtc->config->port_clock > 270000)
-               stagger = 0x18;
-       else if (intel_crtc->config->port_clock > 135000)
-               stagger = 0xd;
-       else if (intel_crtc->config->port_clock > 67500)
-               stagger = 0x7;
-       else if (intel_crtc->config->port_clock > 33750)
-               stagger = 0x4;
-       else
-               stagger = 0x2;
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
-       val |= DPIO_TX2_STAGGER_MASK(0x1f);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
-       if (intel_crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
-               val |= DPIO_TX2_STAGGER_MASK(0x1f);
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-       }
-
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
-                      DPIO_LANESTAGGER_STRAP(stagger) |
-                      DPIO_LANESTAGGER_STRAP_OVRD |
-                      DPIO_TX1_STAGGER_MASK(0x1f) |
-                      DPIO_TX1_STAGGER_MULT(6) |
-                      DPIO_TX2_STAGGER_MULT(0));
-
-       if (intel_crtc->config->lane_count > 2) {
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
-                              DPIO_LANESTAGGER_STRAP(stagger) |
-                              DPIO_LANESTAGGER_STRAP_OVRD |
-                              DPIO_TX1_STAGGER_MASK(0x1f) |
-                              DPIO_TX1_STAGGER_MULT(7) |
-                              DPIO_TX2_STAGGER_MULT(5));
-       }
-
-       /* Deassert data lane reset */
-       chv_data_lane_soft_reset(encoder, false);
-
-       mutex_unlock(&dev_priv->sb_lock);
+       chv_phy_pre_encoder_enable(encoder);
 
        intel_enable_dp(encoder);
 
        /* Second common lane will stay alive on its own now */
-       if (dport->release_cl2_override) {
-               chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
-               dport->release_cl2_override = false;
-       }
+       chv_phy_release_cl2_override(encoder);
 }
 
 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       enum pipe pipe = intel_crtc->pipe;
-       unsigned int lane_mask =
-               intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
-       u32 val;
-
        intel_dp_prepare(encoder);
 
-       /*
-        * Must trick the second common lane into life.
-        * Otherwise we can't even access the PLL.
-        */
-       if (ch == DPIO_CH0 && pipe == PIPE_B)
-               dport->release_cl2_override =
-                       !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
-
-       chv_phy_powergate_lanes(encoder, true, lane_mask);
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       /* Assert data lane reset */
-       chv_data_lane_soft_reset(encoder, true);
-
-       /* program left/right clock distribution */
-       if (pipe != PIPE_B) {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
-               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
-               if (ch == DPIO_CH0)
-                       val |= CHV_BUFLEFTENA1_FORCE;
-               if (ch == DPIO_CH1)
-                       val |= CHV_BUFRIGHTENA1_FORCE;
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
-       } else {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
-               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
-               if (ch == DPIO_CH0)
-                       val |= CHV_BUFLEFTENA2_FORCE;
-               if (ch == DPIO_CH1)
-                       val |= CHV_BUFRIGHTENA2_FORCE;
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
-       }
-
-       /* program clock channel usage */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
-       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
-       if (pipe != PIPE_B)
-               val &= ~CHV_PCS_USEDCLKCHANNEL;
-       else
-               val |= CHV_PCS_USEDCLKCHANNEL;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
-
-       if (intel_crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
-               val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
-               if (pipe != PIPE_B)
-                       val &= ~CHV_PCS_USEDCLKCHANNEL;
-               else
-                       val |= CHV_PCS_USEDCLKCHANNEL;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
-       }
-
-       /*
-        * This a a bit weird since generally CL
-        * matches the pipe, but here we need to
-        * pick the CL based on the port.
-        */
-       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
-       if (pipe != PIPE_B)
-               val &= ~CHV_CMN_USEDCLKCHANNEL;
-       else
-               val |= CHV_CMN_USEDCLKCHANNEL;
-       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
-
-       mutex_unlock(&dev_priv->sb_lock);
+       chv_phy_pre_pll_enable(encoder);
 }
 
 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
-       u32 val;
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       /* disable left/right clock distribution */
-       if (pipe != PIPE_B) {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
-               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
-       } else {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
-               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
-       }
-
-       mutex_unlock(&dev_priv->sb_lock);
-
-       /*
-        * Leave the power down bit cleared for at least one
-        * lane so that chv_powergate_phy_ch() will power
-        * on something when the channel is otherwise unused.
-        * When the port is off and the override is removed
-        * the lanes power down anyway, so otherwise it doesn't
-        * really matter what the state of power down bits is
-        * after this.
-        */
-       chv_phy_powergate_lanes(encoder, false, 0x0);
+       chv_phy_post_pll_disable(encoder);
 }
 
 /*
@@ -3089,7 +2878,7 @@ uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = dp_to_dig_port(intel_dp)->port;
 
        if (IS_BROXTON(dev))
@@ -3178,16 +2967,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
 
 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(dport->base.base.crtc);
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        unsigned long demph_reg_value, preemph_reg_value,
                uniqtranscale_reg_value;
        uint8_t train_set = intel_dp->train_set[0];
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       int pipe = intel_crtc->pipe;
 
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
        case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3262,37 +3045,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
                return 0;
        }
 
-       mutex_lock(&dev_priv->sb_lock);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
-                        uniqtranscale_reg_value);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
-       mutex_unlock(&dev_priv->sb_lock);
+       vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
+                                uniqtranscale_reg_value, 0);
 
        return 0;
 }
 
-static bool chv_need_uniq_trans_scale(uint8_t train_set)
-{
-       return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
-               (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
-}
-
 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
 {
-       struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
-       u32 deemph_reg_value, margin_reg_value, val;
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       u32 deemph_reg_value, margin_reg_value;
+       bool uniq_trans_scale = false;
        uint8_t train_set = intel_dp->train_set[0];
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       enum pipe pipe = intel_crtc->pipe;
-       int i;
 
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
        case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3312,7 +3076,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
                case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
                        deemph_reg_value = 128;
                        margin_reg_value = 154;
-                       /* FIXME extra to set for 1200 */
+                       uniq_trans_scale = true;
                        break;
                default:
                        return 0;
@@ -3364,88 +3128,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
                return 0;
        }
 
-       mutex_lock(&dev_priv->sb_lock);
-
-       /* Clear calc init */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
-       val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
-       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
-       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
-       if (intel_crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
-               val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
-               val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
-               val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-       }
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
-       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
-       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
-
-       if (intel_crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
-               val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
-               val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
-       }
-
-       /* Program swing deemph */
-       for (i = 0; i < intel_crtc->config->lane_count; i++) {
-               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
-               val &= ~DPIO_SWING_DEEMPH9P5_MASK;
-               val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
-       }
-
-       /* Program swing margin */
-       for (i = 0; i < intel_crtc->config->lane_count; i++) {
-               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-
-               val &= ~DPIO_SWING_MARGIN000_MASK;
-               val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
-
-               /*
-                * Supposedly this value shouldn't matter when unique transition
-                * scale is disabled, but in fact it does matter. Let's just
-                * always program the same value and hope it's OK.
-                */
-               val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
-               val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
-
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
-       }
-
-       /*
-        * The document said it needs to set bit 27 for ch0 and bit 26
-        * for ch1. Might be a typo in the doc.
-        * For now, for this unique transition scale selection, set bit
-        * 27 for ch0 and ch1.
-        */
-       for (i = 0; i < intel_crtc->config->lane_count; i++) {
-               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
-               if (chv_need_uniq_trans_scale(train_set))
-                       val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
-               else
-                       val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
-       }
-
-       /* Start swing calculation */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
-       val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
-       if (intel_crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
-               val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-       }
-
-       mutex_unlock(&dev_priv->sb_lock);
+       chv_set_phy_signal_level(encoder, deemph_reg_value,
+                                margin_reg_value, uniq_trans_scale);
 
        return 0;
 }
@@ -3612,7 +3296,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
        uint32_t val;
 
@@ -3634,8 +3318,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
        if (port == PORT_A)
                return;
 
-       if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
-                    1))
+       if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
+                                   DP_TP_STATUS_IDLE_DONE,
+                                   DP_TP_STATUS_IDLE_DONE,
+                                   1))
                DRM_ERROR("Timed out waiting for DP idle patterns\n");
 }
 
@@ -3646,7 +3332,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
        struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
        enum port port = intel_dig_port->port;
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t DP = intel_dp->DP;
 
        if (WARN_ON(HAS_DDI(dev)))
@@ -3698,7 +3384,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                I915_WRITE(intel_dp->output_reg, DP);
                POSTING_READ(intel_dp->output_reg);
 
-               intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+               intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
        }
@@ -3713,8 +3399,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint8_t rev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
                             sizeof(intel_dp->dpcd)) < 0)
@@ -3771,6 +3456,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                        DRM_DEBUG_KMS("PSR2 %s on sink",
                                dev_priv->psr.psr2_support ? "supported" : "not supported");
                }
+
+               /* Read the eDP Display control capabilities registers */
+               memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
+               if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
+                               (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+                                               intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
+                                                               sizeof(intel_dp->edp_dpcd)))
+                       DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+                                       intel_dp->edp_dpcd);
        }
 
        DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
@@ -3778,10 +3472,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                      yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
 
        /* Intermediate frequency support */
-       if (is_edp(intel_dp) &&
-           (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
-           (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
-           (rev >= 0x03)) { /* eDp v1.4 or higher */
+       if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
                __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
                int i;
 
@@ -4559,7 +4250,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
        }
 
        if (intel_encoder->type != INTEL_OUTPUT_EDP)
-               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+               intel_encoder->type = INTEL_OUTPUT_DP;
 
        intel_dp_probe_oui(intel_dp);
 
@@ -4635,7 +4326,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
                /* MST devices are disconnected from a monitor POV */
                intel_dp_unset_edid(intel_dp);
                if (intel_encoder->type != INTEL_OUTPUT_EDP)
-                       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+                       intel_encoder->type = INTEL_OUTPUT_DP;
                return connector_status_disconnected;
        }
 
@@ -4645,7 +4336,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 
        intel_dp->detect_done = false;
 
-       if (intel_connector->detect_edid)
+       if (is_edp(intel_dp) || intel_connector->detect_edid)
                return connector_status_connected;
        else
                return connector_status_disconnected;
@@ -4674,7 +4365,7 @@ intel_dp_force(struct drm_connector *connector)
        intel_display_power_put(dev_priv, power_domain);
 
        if (intel_encoder->type != INTEL_OUTPUT_EDP)
-               intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+               intel_encoder->type = INTEL_OUTPUT_DP;
 }
 
 static int intel_dp_get_modes(struct drm_connector *connector)
@@ -4723,7 +4414,7 @@ intel_dp_set_property(struct drm_connector *connector,
                      struct drm_property *property,
                      uint64_t val)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -4811,6 +4502,32 @@ done:
        return 0;
 }
 
+static int
+intel_dp_connector_register(struct drm_connector *connector)
+{
+       struct intel_dp *intel_dp = intel_attached_dp(connector);
+       int ret;
+
+       ret = intel_connector_register(connector);
+       if (ret)
+               return ret;
+
+       i915_debugfs_connector_add(connector);
+
+       DRM_DEBUG_KMS("registering %s bus for %s\n",
+                     intel_dp->aux.name, connector->kdev->kobj.name);
+
+       intel_dp->aux.dev = connector->kdev;
+       return drm_dp_aux_register(&intel_dp->aux);
+}
+
+static void
+intel_dp_connector_unregister(struct drm_connector *connector)
+{
+       drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
+       intel_connector_unregister(connector);
+}
+
 static void
 intel_dp_connector_destroy(struct drm_connector *connector)
 {
@@ -4851,6 +4568,9 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
                        intel_dp->edp_notifier.notifier_call = NULL;
                }
        }
+
+       intel_dp_aux_fini(intel_dp);
+
        drm_encoder_cleanup(encoder);
        kfree(intel_dig_port);
 }
@@ -4876,7 +4596,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4899,13 +4619,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
 
 void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
-       struct intel_dp *intel_dp;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+       if (!HAS_DDI(dev_priv))
+               intel_dp->DP = I915_READ(intel_dp->output_reg);
 
        if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
                return;
 
-       intel_dp = enc_to_intel_dp(encoder);
-
        pps_lock(intel_dp);
 
        /*
@@ -4927,6 +4649,8 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_dp_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_dp_connector_register,
+       .early_unregister = intel_dp_connector_unregister,
        .destroy = intel_dp_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -4935,7 +4659,6 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
        .get_modes = intel_dp_get_modes,
        .mode_valid = intel_dp_mode_valid,
-       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -4949,13 +4672,13 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum intel_display_power_domain power_domain;
        enum irqreturn ret = IRQ_NONE;
 
        if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
            intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
-               intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+               intel_dig_port->base.type = INTEL_OUTPUT_DP;
 
        if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
                /*
@@ -4977,9 +4700,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
        intel_display_power_get(dev_priv, power_domain);
 
        if (long_hpd) {
-               /* indicate that we need to restart link training */
-               intel_dp->train_set_valid = false;
-
                intel_dp_long_pulse(intel_dp->attached_connector);
                if (intel_dp->is_mst)
                        ret = IRQ_HANDLED;
@@ -5020,7 +4740,7 @@ put_power:
 /* check the VBT to see whether the eDP is on another port */
 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /*
         * eDP not supported on g4x. so bail out early just
@@ -5062,82 +4782,93 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
 }
 
 static void
-intel_dp_init_panel_power_sequencer(struct drm_device *dev,
-                                   struct intel_dp *intel_dp)
+intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
+                          struct intel_dp *intel_dp, struct edp_power_seq *seq)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct edp_power_seq cur, vbt, spec,
-               *final = &intel_dp->pps_delays;
        u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
-       i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+       struct pps_registers regs;
 
-       lockdep_assert_held(&dev_priv->pps_mutex);
-
-       /* already initialized? */
-       if (final->t11_t12 != 0)
-               return;
-
-       if (IS_BROXTON(dev)) {
-               /*
-                * TODO: BXT has 2 sets of PPS registers.
-                * Correct Register for Broxton need to be identified
-                * using VBT. hardcoding for now
-                */
-               pp_ctrl_reg = BXT_PP_CONTROL(0);
-               pp_on_reg = BXT_PP_ON_DELAYS(0);
-               pp_off_reg = BXT_PP_OFF_DELAYS(0);
-       } else if (HAS_PCH_SPLIT(dev)) {
-               pp_ctrl_reg = PCH_PP_CONTROL;
-               pp_on_reg = PCH_PP_ON_DELAYS;
-               pp_off_reg = PCH_PP_OFF_DELAYS;
-               pp_div_reg = PCH_PP_DIVISOR;
-       } else {
-               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
-
-               pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
-               pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
-               pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
-               pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
-       }
+       intel_pps_get_registers(dev_priv, intel_dp, &regs);
 
        /* Workaround: Need to write PP_CONTROL with the unlock key as
         * the very first thing. */
        pp_ctl = ironlake_get_pp_control(intel_dp);
 
-       pp_on = I915_READ(pp_on_reg);
-       pp_off = I915_READ(pp_off_reg);
-       if (!IS_BROXTON(dev)) {
-               I915_WRITE(pp_ctrl_reg, pp_ctl);
-               pp_div = I915_READ(pp_div_reg);
+       pp_on = I915_READ(regs.pp_on);
+       pp_off = I915_READ(regs.pp_off);
+       if (!IS_BROXTON(dev_priv)) {
+               I915_WRITE(regs.pp_ctrl, pp_ctl);
+               pp_div = I915_READ(regs.pp_div);
        }
 
        /* Pull timing values out of registers */
-       cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
-               PANEL_POWER_UP_DELAY_SHIFT;
+       seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
+                    PANEL_POWER_UP_DELAY_SHIFT;
 
-       cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
-               PANEL_LIGHT_ON_DELAY_SHIFT;
+       seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
+                 PANEL_LIGHT_ON_DELAY_SHIFT;
 
-       cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
-               PANEL_LIGHT_OFF_DELAY_SHIFT;
+       seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
+                 PANEL_LIGHT_OFF_DELAY_SHIFT;
 
-       cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
-               PANEL_POWER_DOWN_DELAY_SHIFT;
+       seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
+                  PANEL_POWER_DOWN_DELAY_SHIFT;
 
-       if (IS_BROXTON(dev)) {
+       if (IS_BROXTON(dev_priv)) {
                u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
                        BXT_POWER_CYCLE_DELAY_SHIFT;
                if (tmp > 0)
-                       cur.t11_t12 = (tmp - 1) * 1000;
+                       seq->t11_t12 = (tmp - 1) * 1000;
                else
-                       cur.t11_t12 = 0;
+                       seq->t11_t12 = 0;
        } else {
-               cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
+               seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
                       PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
        }
+}
+
+static void
+intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
+{
+       DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+                     state_name,
+                     seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
+}
+
+static void
+intel_pps_verify_state(struct drm_i915_private *dev_priv,
+                      struct intel_dp *intel_dp)
+{
+       struct edp_power_seq hw;
+       struct edp_power_seq *sw = &intel_dp->pps_delays;
+
+       intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
+
+       if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
+           hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
+               DRM_ERROR("PPS state mismatch\n");
+               intel_pps_dump_state("sw", sw);
+               intel_pps_dump_state("hw", &hw);
+       }
+}
+
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+                                   struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct edp_power_seq cur, vbt, spec,
+               *final = &intel_dp->pps_delays;
+
+       lockdep_assert_held(&dev_priv->pps_mutex);
+
+       /* already initialized? */
+       if (final->t11_t12 != 0)
+               return;
+
+       intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
 
-       DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-                     cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
+       intel_pps_dump_state("cur", &cur);
 
        vbt = dev_priv->vbt.edp.pps;
 
@@ -5153,8 +4884,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
         * too. */
        spec.t11_t12 = (510 + 100) * 10;
 
-       DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
-                     vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
+       intel_pps_dump_state("vbt", &vbt);
 
        /* Use the max of the register settings and vbt. If both are
         * unset, fall back to the spec limits. */
@@ -5182,59 +4912,41 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
 
        DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
                      intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+
+       /*
+        * We override the HW backlight delays to 1 because we do manual waits
+        * on them. For T8, even BSpec recommends doing it. For T9, if we
+        * don't do this, we'll end up waiting for the backlight off delay
+        * twice: once when we do the manual sleep, and once when we disable
+        * the panel and wait for the PP_STATUS bit to become zero.
+        */
+       final->t8 = 1;
+       final->t9 = 1;
 }
 
 static void
 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
                                              struct intel_dp *intel_dp)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 pp_on, pp_off, pp_div, port_sel = 0;
        int div = dev_priv->rawclk_freq / 1000;
-       i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
+       struct pps_registers regs;
        enum port port = dp_to_dig_port(intel_dp)->port;
        const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
        lockdep_assert_held(&dev_priv->pps_mutex);
 
-       if (IS_BROXTON(dev)) {
-               /*
-                * TODO: BXT has 2 sets of PPS registers.
-                * Correct Register for Broxton need to be identified
-                * using VBT. hardcoding for now
-                */
-               pp_ctrl_reg = BXT_PP_CONTROL(0);
-               pp_on_reg = BXT_PP_ON_DELAYS(0);
-               pp_off_reg = BXT_PP_OFF_DELAYS(0);
-
-       } else if (HAS_PCH_SPLIT(dev)) {
-               pp_on_reg = PCH_PP_ON_DELAYS;
-               pp_off_reg = PCH_PP_OFF_DELAYS;
-               pp_div_reg = PCH_PP_DIVISOR;
-       } else {
-               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+       intel_pps_get_registers(dev_priv, intel_dp, &regs);
 
-               pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
-               pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
-               pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
-       }
-
-       /*
-        * And finally store the new values in the power sequencer. The
-        * backlight delays are set to 1 because we do manual waits on them. For
-        * T8, even BSpec recommends doing it. For T9, if we don't do this,
-        * we'll end up waiting for the backlight off delay twice: once when we
-        * do the manual sleep, and once when we disable the panel and wait for
-        * the PP_STATUS bit to become zero.
-        */
        pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
-               (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
-       pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
+               (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
+       pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
                 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
        /* Compute the divisor for the pp clock, simply match the Bspec
         * formula. */
        if (IS_BROXTON(dev)) {
-               pp_div = I915_READ(pp_ctrl_reg);
+               pp_div = I915_READ(regs.pp_ctrl);
                pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
                pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
                                << BXT_POWER_CYCLE_DELAY_SHIFT);
@@ -5257,19 +4969,19 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
 
        pp_on |= port_sel;
 
-       I915_WRITE(pp_on_reg, pp_on);
-       I915_WRITE(pp_off_reg, pp_off);
+       I915_WRITE(regs.pp_on, pp_on);
+       I915_WRITE(regs.pp_off, pp_off);
        if (IS_BROXTON(dev))
-               I915_WRITE(pp_ctrl_reg, pp_div);
+               I915_WRITE(regs.pp_ctrl, pp_div);
        else
-               I915_WRITE(pp_div_reg, pp_div);
+               I915_WRITE(regs.pp_div, pp_div);
 
        DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
-                     I915_READ(pp_on_reg),
-                     I915_READ(pp_off_reg),
+                     I915_READ(regs.pp_on),
+                     I915_READ(regs.pp_off),
                      IS_BROXTON(dev) ?
-                     (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
-                     I915_READ(pp_div_reg));
+                     (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
+                     I915_READ(regs.pp_div));
 }
 
 /**
@@ -5286,7 +4998,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
  */
 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *encoder;
        struct intel_digital_port *dig_port = NULL;
        struct intel_dp *intel_dp = dev_priv->drrs.dp;
@@ -5385,7 +5097,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5417,7 +5129,7 @@ unlock:
 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5432,9 +5144,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
        }
 
        if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(dev_priv->dev,
-                       intel_dp->attached_connector->panel.
-                       fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(&dev_priv->drm,
+                                       intel_dp->attached_connector->panel.
+                                       fixed_mode->vrefresh);
 
        dev_priv->drrs.dp = NULL;
        mutex_unlock(&dev_priv->drrs.mutex);
@@ -5464,9 +5176,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
                goto unlock;
 
        if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
-               intel_dp_set_drrs_state(dev_priv->dev,
-                       intel_dp->attached_connector->panel.
-                       downclock_mode->vrefresh);
+               intel_dp_set_drrs_state(&dev_priv->drm,
+                                       intel_dp->attached_connector->panel.
+                                       downclock_mode->vrefresh);
 
 unlock:
        mutex_unlock(&dev_priv->drrs.mutex);
@@ -5485,7 +5197,7 @@ unlock:
 void intel_edp_drrs_invalidate(struct drm_device *dev,
                unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -5508,9 +5220,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
 
        /* invalidate means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(dev_priv->dev,
-                               dev_priv->drrs.dp->attached_connector->panel.
-                               fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(&dev_priv->drm,
+                                       dev_priv->drrs.dp->attached_connector->panel.
+                                       fixed_mode->vrefresh);
 
        mutex_unlock(&dev_priv->drrs.mutex);
 }
@@ -5530,7 +5242,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
 void intel_edp_drrs_flush(struct drm_device *dev,
                unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -5553,9 +5265,9 @@ void intel_edp_drrs_flush(struct drm_device *dev,
 
        /* flush means busy screen hence upclock */
        if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
-               intel_dp_set_drrs_state(dev_priv->dev,
-                               dev_priv->drrs.dp->attached_connector->panel.
-                               fixed_mode->vrefresh);
+               intel_dp_set_drrs_state(&dev_priv->drm,
+                                       dev_priv->drrs.dp->attached_connector->panel.
+                                       fixed_mode->vrefresh);
 
        /*
         * flush also means no more activity hence schedule downclock, if all
@@ -5590,14 +5302,14 @@ void intel_edp_drrs_flush(struct drm_device *dev,
  *
  * DRRS saves power by switching to low RR based on usage scenarios.
  *
- * eDP DRRS:-
- *        The implementation is based on frontbuffer tracking implementation.
- * When there is a disturbance on the screen triggered by user activity or a
- * periodic system activity, DRRS is disabled (RR is changed to high RR).
- * When there is no movement on screen, after a timeout of 1 second, a switch
- * to low RR is made.
- *        For integration with frontbuffer tracking code,
- * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
+ * The implementation is based on frontbuffer tracking implementation.  When
+ * there is a disturbance on the screen triggered by user activity or a periodic
+ * system activity, DRRS is disabled (RR is changed to high RR).  When there is
+ * no movement on screen, after a timeout of 1 second, a switch to low RR is
+ * made.
+ *
+ * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
+ * and intel_edp_drrs_flush() are called.
  *
  * DRRS can be further extended to support other internal panels and also
  * the scenario of video playback wherein RR is set based on the rate
@@ -5623,7 +5335,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
 {
        struct drm_connector *connector = &intel_connector->base;
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *downclock_mode = NULL;
 
        INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
@@ -5661,7 +5373,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *fixed_mode = NULL;
        struct drm_display_mode *downclock_mode = NULL;
        bool has_dpcd;
@@ -5672,8 +5384,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!is_edp(intel_dp))
                return true;
 
+       /*
+        * On IBX/CPT we may get here with LVDS already registered. Since the
+        * driver uses the only internal power sequencer available for both
+        * eDP and LVDS bail out early in this case to prevent interfering
+        * with an already powered-on LVDS power sequencer.
+        */
+       if (intel_get_lvds_encoder(dev)) {
+               WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+               DRM_INFO("LVDS was detected, not registering eDP\n");
+
+               return false;
+       }
+
        pps_lock(intel_dp);
+
+       intel_dp_init_panel_power_timestamps(intel_dp);
+
+       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+               vlv_initial_power_sequencer_setup(intel_dp);
+       } else {
+               intel_dp_init_panel_power_sequencer(dev, intel_dp);
+               intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
+       }
+
        intel_edp_panel_vdd_sanitize(intel_dp);
+
        pps_unlock(intel_dp);
 
        /* Cache DPCD and EDID for edp. */
@@ -5687,14 +5423,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        } else {
                /* if this fails, presume the device is a ghost */
                DRM_INFO("failed to retrieve link info, disabling eDP\n");
-               return false;
+               goto out_vdd_off;
        }
 
-       /* We now know it's not a ghost, init power sequence regs. */
-       pps_lock(intel_dp);
-       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
-       pps_unlock(intel_dp);
-
        mutex_lock(&dev->mode_config.mutex);
        edid = drm_get_edid(connector, &intel_dp->aux.ddc);
        if (edid) {
@@ -5725,8 +5456,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
                fixed_mode = drm_mode_duplicate(dev,
                                        dev_priv->vbt.lfp_lvds_vbt_mode);
-               if (fixed_mode)
+               if (fixed_mode) {
                        fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+                       connector->display_info.width_mm = fixed_mode->width_mm;
+                       connector->display_info.height_mm = fixed_mode->height_mm;
+               }
        }
        mutex_unlock(&dev->mode_config.mutex);
 
@@ -5759,6 +5493,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        intel_panel_setup_backlight(connector, pipe);
 
        return true;
+
+out_vdd_off:
+       cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
+       /*
+        * vdd might still be enabled do to the delayed vdd off.
+        * Make sure vdd is actually turned off here.
+        */
+       pps_lock(intel_dp);
+       edp_panel_vdd_off_sync(intel_dp);
+       pps_unlock(intel_dp);
+
+       return false;
 }
 
 bool
@@ -5769,9 +5515,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
-       int type, ret;
+       int type;
 
        if (WARN(intel_dig_port->max_lanes < 1,
                 "Not enough lanes (%d) for DP on port %c\n",
@@ -5830,17 +5576,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
+       intel_dp_aux_init(intel_dp, intel_connector);
+
        INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
                          edp_panel_vdd_work);
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
-       drm_connector_register(connector);
 
        if (HAS_DDI(dev))
                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
        else
                intel_connector->get_hw_state = intel_connector_get_hw_state;
-       intel_connector->unregister = intel_dp_connector_unregister;
 
        /* Set up the hotplug pin. */
        switch (port) {
@@ -5865,22 +5611,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                BUG();
        }
 
-       if (is_edp(intel_dp)) {
-               pps_lock(intel_dp);
-               intel_dp_init_panel_power_timestamps(intel_dp);
-               if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-                       vlv_initial_power_sequencer_setup(intel_dp);
-               else
-                       intel_dp_init_panel_power_sequencer(dev, intel_dp);
-               pps_unlock(intel_dp);
-       }
-
-       ret = intel_dp_aux_init(intel_dp, intel_connector);
-       if (ret)
-               goto fail;
-
        /* init MST on ports that can support it */
-       if (HAS_DP_MST(dev) &&
+       if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
            (port == PORT_B || port == PORT_C || port == PORT_D))
                intel_dp_mst_encoder_init(intel_dig_port,
                                          intel_connector->base.base.id);
@@ -5902,32 +5634,19 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
        }
 
-       i915_debugfs_connector_add(connector);
-
        return true;
 
 fail:
-       if (is_edp(intel_dp)) {
-               cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
-               /*
-                * vdd might still be enabled do to the delayed vdd off.
-                * Make sure vdd is actually turned off here.
-                */
-               pps_lock(intel_dp);
-               edp_panel_vdd_off_sync(intel_dp);
-               pps_unlock(intel_dp);
-       }
-       drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
 
        return false;
 }
 
-void
-intel_dp_init(struct drm_device *dev,
-             i915_reg_t output_reg, enum port port)
+bool intel_dp_init(struct drm_device *dev,
+                  i915_reg_t output_reg,
+                  enum port port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_digital_port *intel_dig_port;
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
@@ -5935,7 +5654,7 @@ intel_dp_init(struct drm_device *dev,
 
        intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
        if (!intel_dig_port)
-               return;
+               return false;
 
        intel_connector = intel_connector_alloc();
        if (!intel_connector)
@@ -5945,7 +5664,7 @@ intel_dp_init(struct drm_device *dev,
        encoder = &intel_encoder->base;
 
        if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
-                            DRM_MODE_ENCODER_TMDS, NULL))
+                            DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
                goto err_encoder_init;
 
        intel_encoder->compute_config = intel_dp_compute_config;
@@ -5975,7 +5694,7 @@ intel_dp_init(struct drm_device *dev,
        intel_dig_port->dp.output_reg = output_reg;
        intel_dig_port->max_lanes = 4;
 
-       intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+       intel_encoder->type = INTEL_OUTPUT_DP;
        if (IS_CHERRYVIEW(dev)) {
                if (port == PORT_D)
                        intel_encoder->crtc_mask = 1 << 2;
@@ -5992,7 +5711,7 @@ intel_dp_init(struct drm_device *dev,
        if (!intel_dp_init_connector(intel_dig_port, intel_connector))
                goto err_init_connector;
 
-       return;
+       return true;
 
 err_init_connector:
        drm_encoder_cleanup(encoder);
@@ -6000,49 +5719,40 @@ err_encoder_init:
        kfree(intel_connector);
 err_connector_alloc:
        kfree(intel_dig_port);
-
-       return;
+       return false;
 }
 
 void intel_dp_mst_suspend(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        /* disable MST */
        for (i = 0; i < I915_MAX_PORTS; i++) {
                struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
-               if (!intel_dig_port)
+
+               if (!intel_dig_port || !intel_dig_port->dp.can_mst)
                        continue;
 
-               if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
-                       if (!intel_dig_port->dp.can_mst)
-                               continue;
-                       if (intel_dig_port->dp.is_mst)
-                               drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
-               }
+               if (intel_dig_port->dp.is_mst)
+                       drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
        }
 }
 
 void intel_dp_mst_resume(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        for (i = 0; i < I915_MAX_PORTS; i++) {
                struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
-               if (!intel_dig_port)
-                       continue;
-               if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
-                       int ret;
+               int ret;
 
-                       if (!intel_dig_port->dp.can_mst)
-                               continue;
+               if (!intel_dig_port || !intel_dig_port->dp.can_mst)
+                       continue;
 
-                       ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
-                       if (ret != 0) {
-                               intel_dp_check_mst_status(&intel_dig_port->dp);
-                       }
-               }
+               ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+               if (ret)
+                       intel_dp_check_mst_status(&intel_dig_port->dp);
        }
 }
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
new file mode 100644 (file)
index 0000000..6532e22
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_drv.h"
+
+static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
+{
+       uint8_t reg_val = 0;
+
+       if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
+                             &reg_val) < 0) {
+               DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+                             DP_EDP_DISPLAY_CONTROL_REGISTER);
+               return;
+       }
+       if (enable)
+               reg_val |= DP_EDP_BACKLIGHT_ENABLE;
+       else
+               reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
+
+       if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
+                              reg_val) != 1) {
+               DRM_DEBUG_KMS("Failed to %s aux backlight\n",
+                             enable ? "enable" : "disable");
+       }
+}
+
+/*
+ * Read the current backlight value from DPCD register(s) based
+ * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
+ */
+static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+       uint8_t read_val[2] = { 0x0 };
+       uint16_t level = 0;
+
+       if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+                            &read_val, sizeof(read_val)) < 0) {
+               DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
+                             DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
+               return 0;
+       }
+       level = read_val[0];
+       if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
+               level = (read_val[0] << 8 | read_val[1]);
+
+       return level;
+}
+
+/*
+ * Sends the current backlight level over the aux channel, checking if its using
+ * 8-bit or 16 bit value (MSB and LSB)
+ */
+static void
+intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+       uint8_t vals[2] = { 0x0 };
+
+       vals[0] = level;
+
+       /* Write the MSB and/or LSB */
+       if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
+               vals[0] = (level & 0xFF00) >> 8;
+               vals[1] = (level & 0xFF);
+       }
+       if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
+                             vals, sizeof(vals)) < 0) {
+               DRM_DEBUG_KMS("Failed to write aux backlight level\n");
+               return;
+       }
+}
+
+static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+       uint8_t dpcd_buf = 0;
+
+       set_aux_backlight_enable(intel_dp, true);
+
+       if ((drm_dp_dpcd_readb(&intel_dp->aux,
+                              DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
+           ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
+            DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+                                  (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
+}
+
+static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
+{
+       set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
+}
+
+static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
+                                       enum pipe pipe)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+       struct intel_panel *panel = &connector->panel;
+
+       intel_dp_aux_enable_backlight(connector);
+
+       if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
+               panel->backlight.max = 0xFFFF;
+       else
+               panel->backlight.max = 0xFF;
+
+       panel->backlight.min = 0;
+       panel->backlight.level = intel_dp_aux_get_backlight(connector);
+
+       panel->backlight.enabled = panel->backlight.level != 0;
+
+       return 0;
+}
+
+static bool
+intel_dp_aux_display_control_capable(struct intel_connector *connector)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
+
+       /* Check the  eDP Display control capabilities registers to determine if
+        * the panel can support backlight control over the aux channel
+        */
+       if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
+           (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
+           !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
+             (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
+               DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
+               return true;
+       }
+       return false;
+}
+
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
+{
+       struct intel_panel *panel = &intel_connector->panel;
+
+       if (!i915.enable_dpcd_backlight)
+               return -ENODEV;
+
+       if (!intel_dp_aux_display_control_capable(intel_connector))
+               return -ENODEV;
+
+       panel->backlight.setup = intel_dp_aux_setup_backlight;
+       panel->backlight.enable = intel_dp_aux_enable_backlight;
+       panel->backlight.disable = intel_dp_aux_disable_backlight;
+       panel->backlight.set = intel_dp_aux_set_backlight;
+       panel->backlight.get = intel_dp_aux_get_backlight;
+
+       return 0;
+}
index 0b8eefc2acc5d93088b960e4714bce55944df82e..60fb39cd220b4a01f5b9469a5de29ee496abe61f 100644 (file)
@@ -85,8 +85,7 @@ static bool
 intel_dp_reset_link_train(struct intel_dp *intel_dp,
                        uint8_t dp_train_pat)
 {
-       if (!intel_dp->train_set_valid)
-               memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+       memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
        intel_dp_set_signal_levels(intel_dp);
        return intel_dp_set_link_train(intel_dp, dp_train_pat);
 }
@@ -161,23 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
                        break;
                }
 
-               /*
-                * if we used previously trained voltage and pre-emphasis values
-                * and we don't get clock recovery, reset link training values
-                */
-               if (intel_dp->train_set_valid) {
-                       DRM_DEBUG_KMS("clock recovery not ok, reset");
-                       /* clear the flag as we are not reusing train set */
-                       intel_dp->train_set_valid = false;
-                       if (!intel_dp_reset_link_train(intel_dp,
-                                                      DP_TRAINING_PATTERN_1 |
-                                                      DP_LINK_SCRAMBLING_DISABLE)) {
-                               DRM_ERROR("failed to enable link training\n");
-                               return;
-                       }
-                       continue;
-               }
-
                /* Check to see if we've tried the max voltage */
                for (i = 0; i < intel_dp->lane_count; i++)
                        if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
@@ -284,7 +266,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
                /* Make sure clock is still ok */
                if (!drm_dp_clock_recovery_ok(link_status,
                                              intel_dp->lane_count)) {
-                       intel_dp->train_set_valid = false;
                        intel_dp_link_training_clock_recovery(intel_dp);
                        intel_dp_set_link_train(intel_dp,
                                                training_pattern |
@@ -301,7 +282,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 
                /* Try 5 times, then try clock recovery if that fails */
                if (tries > 5) {
-                       intel_dp->train_set_valid = false;
                        intel_dp_link_training_clock_recovery(intel_dp);
                        intel_dp_set_link_train(intel_dp,
                                                training_pattern |
@@ -322,10 +302,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 
        intel_dp_set_idle_link_train(intel_dp);
 
-       if (channel_eq) {
-               intel_dp->train_set_valid = true;
+       if (channel_eq)
                DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
-       }
 }
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
index 7a34090cef34e6a14276ab2cdfcbc4c498926be8..68a005d729e90b218cc3ac4eaefcb094388370b5 100644 (file)
@@ -47,7 +47,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
        pipe_config->dp_encoder_is_mst = true;
        pipe_config->has_pch_encoder = false;
-       pipe_config->has_dp_encoder = true;
        bpp = 24;
        /*
         * for MST we always configure max link bw - the spec doesn't
@@ -140,7 +139,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
        int ret;
        uint32_t temp;
@@ -207,14 +206,17 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
        int ret;
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
 
-       if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
-                    1))
+       if (intel_wait_for_register(dev_priv,
+                                   DP_TP_STATUS(port),
+                                   DP_TP_STATUS_ACT_SENT,
+                                   DP_TP_STATUS_ACT_SENT,
+                                   1))
                DRM_ERROR("Timed out waiting for ACT sent\n");
 
        ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -239,12 +241,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        u32 temp, flags = 0;
 
-       pipe_config->has_dp_encoder = true;
-
        temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
        if (temp & TRANS_DDI_PHSYNC)
                flags |= DRM_MODE_FLAG_PHSYNC;
@@ -336,6 +336,8 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_dp_mst_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_connector_register,
+       .early_unregister = intel_connector_unregister,
        .destroy = intel_dp_mst_connector_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -455,7 +457,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
        drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
 
-       intel_connector->unregister = intel_connector_unregister;
        intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
        intel_connector->mst_port = intel_dp;
        intel_connector->port = port;
@@ -477,9 +478,11 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
+
        drm_modeset_lock_all(dev);
        intel_connector_add_to_fbdev(intel_connector);
        drm_modeset_unlock_all(dev);
+
        drm_connector_register(&intel_connector->base);
 }
 
@@ -489,7 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct drm_device *dev = connector->dev;
 
-       intel_connector->unregister(intel_connector);
+       drm_connector_unregister(connector);
 
        /* need to nuke the connector */
        drm_modeset_lock_all(dev);
@@ -534,7 +537,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
        intel_mst->primary = intel_dig_port;
 
        drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
-                        DRM_MODE_ENCODER_DPMST, NULL);
+                        DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
 
        intel_encoder->type = INTEL_OUTPUT_DP_MST;
        intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
new file mode 100644 (file)
index 0000000..047f487
--- /dev/null
@@ -0,0 +1,470 @@
+/*
+ * Copyright © 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_drv.h"
+
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 deemph_reg_value, u32 margin_reg_value,
+                             bool uniq_trans_scale)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
+       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 val;
+       int i;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* Clear calc init */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+       val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+               val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
+               val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
+               val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+       }
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+               val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
+               val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+       }
+
+       /* Program swing deemph */
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
+               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+               val &= ~DPIO_SWING_DEEMPH9P5_MASK;
+               val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
+               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+       }
+
+       /* Program swing margin */
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
+               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+
+               val &= ~DPIO_SWING_MARGIN000_MASK;
+               val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
+
+               /*
+                * Supposedly this value shouldn't matter when unique transition
+                * scale is disabled, but in fact it does matter. Let's just
+                * always program the same value and hope it's OK.
+                */
+               val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
+               val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
+
+               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+       }
+
+       /*
+        * The document said it needs to set bit 27 for ch0 and bit 26
+        * for ch1. Might be a typo in the doc.
+        * For now, for this unique transition scale selection, set bit
+        * 27 for ch0 and ch1.
+        */
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
+               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+               if (uniq_trans_scale)
+                       val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
+               else
+                       val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
+               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+       }
+
+       /* Start swing calculation */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+       val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+               val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+       }
+
+       mutex_unlock(&dev_priv->sb_lock);
+
+}
+
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+                             bool reset)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       enum pipe pipe = crtc->pipe;
+       uint32_t val;
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+       if (reset)
+               val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+       else
+               val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+
+       if (crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+               if (reset)
+                       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
+               else
+                       val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+       }
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+       val |= CHV_PCS_REQ_SOFTRESET_EN;
+       if (reset)
+               val &= ~DPIO_PCS_CLK_SOFT_RESET;
+       else
+               val |= DPIO_PCS_CLK_SOFT_RESET;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+
+       if (crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+               val |= CHV_PCS_REQ_SOFTRESET_EN;
+               if (reset)
+                       val &= ~DPIO_PCS_CLK_SOFT_RESET;
+               else
+                       val |= DPIO_PCS_CLK_SOFT_RESET;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+       }
+}
+
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
+       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       enum pipe pipe = intel_crtc->pipe;
+       unsigned int lane_mask =
+               intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
+       u32 val;
+
+       /*
+        * Must trick the second common lane into life.
+        * Otherwise we can't even access the PLL.
+        */
+       if (ch == DPIO_CH0 && pipe == PIPE_B)
+               dport->release_cl2_override =
+                       !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
+
+       chv_phy_powergate_lanes(encoder, true, lane_mask);
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* Assert data lane reset */
+       chv_data_lane_soft_reset(encoder, true);
+
+       /* program left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA1_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA1_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               if (ch == DPIO_CH0)
+                       val |= CHV_BUFLEFTENA2_FORCE;
+               if (ch == DPIO_CH1)
+                       val |= CHV_BUFRIGHTENA2_FORCE;
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
+       /* program clock channel usage */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+       if (pipe != PIPE_B)
+               val &= ~CHV_PCS_USEDCLKCHANNEL;
+       else
+               val |= CHV_PCS_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+               val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+               if (pipe != PIPE_B)
+                       val &= ~CHV_PCS_USEDCLKCHANNEL;
+               else
+                       val |= CHV_PCS_USEDCLKCHANNEL;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+       }
+
+       /*
+        * This a a bit weird since generally CL
+        * matches the pipe, but here we need to
+        * pick the CL based on the port.
+        */
+       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+       if (pipe != PIPE_B)
+               val &= ~CHV_CMN_USEDCLKCHANNEL;
+       else
+               val |= CHV_CMN_USEDCLKCHANNEL;
+       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
+       enum dpio_channel ch = vlv_dport_to_channel(dport);
+       int pipe = intel_crtc->pipe;
+       int data, i, stagger;
+       u32 val;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* allow hardware to manage TX FIFO reset source */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+               val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+       }
+
+       /* Program Tx lane latency optimal setting*/
+       for (i = 0; i < intel_crtc->config->lane_count; i++) {
+               /* Set the upar bit */
+               if (intel_crtc->config->lane_count == 1)
+                       data = 0x0;
+               else
+                       data = (i == 1) ? 0x0 : 0x1;
+               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+                               data << DPIO_UPAR_SHIFT);
+       }
+
+       /* Data lane stagger programming */
+       if (intel_crtc->config->port_clock > 270000)
+               stagger = 0x18;
+       else if (intel_crtc->config->port_clock > 135000)
+               stagger = 0xd;
+       else if (intel_crtc->config->port_clock > 67500)
+               stagger = 0x7;
+       else if (intel_crtc->config->port_clock > 33750)
+               stagger = 0x4;
+       else
+               stagger = 0x2;
+
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+       val |= DPIO_TX2_STAGGER_MASK(0x1f);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+       if (intel_crtc->config->lane_count > 2) {
+               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+               val |= DPIO_TX2_STAGGER_MASK(0x1f);
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+       }
+
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+                      DPIO_LANESTAGGER_STRAP(stagger) |
+                      DPIO_LANESTAGGER_STRAP_OVRD |
+                      DPIO_TX1_STAGGER_MASK(0x1f) |
+                      DPIO_TX1_STAGGER_MULT(6) |
+                      DPIO_TX2_STAGGER_MULT(0));
+
+       if (intel_crtc->config->lane_count > 2) {
+               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+                              DPIO_LANESTAGGER_STRAP(stagger) |
+                              DPIO_LANESTAGGER_STRAP_OVRD |
+                              DPIO_TX1_STAGGER_MASK(0x1f) |
+                              DPIO_TX1_STAGGER_MULT(7) |
+                              DPIO_TX2_STAGGER_MULT(5));
+       }
+
+       /* Deassert data lane reset */
+       chv_data_lane_soft_reset(encoder, false);
+
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+void chv_phy_release_cl2_override(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+       if (dport->release_cl2_override) {
+               chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
+               dport->release_cl2_override = false;
+       }
+}
+
+void chv_phy_post_pll_disable(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
+       u32 val;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* disable left/right clock distribution */
+       if (pipe != PIPE_B) {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+       } else {
+               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+       }
+
+       mutex_unlock(&dev_priv->sb_lock);
+
+       /*
+        * Leave the power down bit cleared for at least one
+        * lane so that chv_powergate_phy_ch() will power
+        * on something when the channel is otherwise unused.
+        * When the port is off and the override is removed
+        * the lanes power down anyway, so otherwise it doesn't
+        * really matter what the state of power down bits is
+        * after this.
+        */
+       chv_phy_powergate_lanes(encoder, false, 0x0);
+}
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+                             u32 demph_reg_value, u32 preemph_reg_value,
+                             u32 uniqtranscale_reg_value, u32 tx3_demph)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
+       int pipe = intel_crtc->pipe;
+
+       mutex_lock(&dev_priv->sb_lock);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+                        uniqtranscale_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+
+       if (tx3_demph)
+               vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
+       int pipe = intel_crtc->pipe;
+
+       /* Program Tx lane resets to default */
+       mutex_lock(&dev_priv->sb_lock);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+                        DPIO_PCS_TX_LANE2_RESET |
+                        DPIO_PCS_TX_LANE1_RESET);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+                        DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
+                        DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
+                        (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
+                                DPIO_PCS_CLK_SOFT_RESET);
+
+       /* Fix up inter-pair skew failure */
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
+       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+       struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = encoder->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
+       int pipe = intel_crtc->pipe;
+       u32 val;
+
+       mutex_lock(&dev_priv->sb_lock);
+
+       /* Enable clock channels for this port */
+       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+       val = 0;
+       if (pipe)
+               val |= (1<<21);
+       else
+               val &= ~(1<<21);
+       val |= 0x001000c4;
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+
+       /* Program lane clock */
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
+void vlv_phy_reset_lanes(struct intel_encoder *encoder)
+{
+       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
+       enum dpio_channel port = vlv_dport_to_channel(dport);
+       int pipe = intel_crtc->pipe;
+
+       mutex_lock(&dev_priv->sb_lock);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
+       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+       mutex_unlock(&dev_priv->sb_lock);
+}
index 3ac705936b0436a0a528b45a3352bbcc689f619c..5c1f2d235ffa5a66baf029f116d8ba8b65a5d8ac 100644 (file)
@@ -83,7 +83,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 void intel_prepare_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_shared_dpll *pll = crtc->config->shared_dpll;
 
        if (WARN_ON(pll == NULL))
@@ -112,7 +112,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
 void intel_enable_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_shared_dpll *pll = crtc->config->shared_dpll;
        unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
        unsigned old_mask;
@@ -151,7 +151,7 @@ out:
 void intel_disable_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_shared_dpll *pll = crtc->config->shared_dpll;
        unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
 
@@ -191,7 +191,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
                       enum intel_dpll_id range_min,
                       enum intel_dpll_id range_max)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_shared_dpll *pll;
        struct intel_shared_dpll_config *shared_dpll;
        enum intel_dpll_id i;
@@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
                if (memcmp(&crtc_state->dpll_hw_state,
                           &shared_dpll[i].hw_state,
                           sizeof(crtc_state->dpll_hw_state)) == 0) {
-                       DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n",
-                                     crtc->base.base.id, pll->name,
+                       DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+                                     crtc->base.base.id, crtc->base.name, pll->name,
                                      shared_dpll[i].crtc_mask,
                                      pll->active_mask);
                        return pll;
@@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
        for (i = range_min; i <= range_max; i++) {
                pll = &dev_priv->shared_dplls[i];
                if (shared_dpll[i].crtc_mask == 0) {
-                       DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
-                                     crtc->base.base.id, pll->name);
+                       DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
+                                     crtc->base.base.id, crtc->base.name, pll->name);
                        return pll;
                }
        }
@@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
                                 struct intel_shared_dpll *pll)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
 
        /* Make sure no transcoder isn't still depending on us. */
@@ -358,14 +358,17 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                i = (enum intel_dpll_id) crtc->pipe;
                pll = &dev_priv->shared_dplls[i];
 
-               DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
-                             crtc->base.base.id, pll->name);
+               DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+                             crtc->base.base.id, crtc->base.name, pll->name);
        } else {
                pll = intel_find_shared_dpll(crtc, crtc_state,
                                             DPLL_ID_PCH_PLL_A,
                                             DPLL_ID_PCH_PLL_B);
        }
 
+       if (!pll)
+               return NULL;
+
        /* reference the pll */
        intel_reference_shared_dpll(pll, crtc_state);
 
@@ -710,7 +713,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                pll = intel_find_shared_dpll(crtc, crtc_state,
                                             DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
 
-       } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+       } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_DP_MST ||
                   encoder->type == INTEL_OUTPUT_EDP) {
                enum intel_dpll_id pll_id;
@@ -853,7 +856,11 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
        I915_WRITE(regs[pll->id].ctl,
                   I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
 
-       if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5))
+       if (intel_wait_for_register(dev_priv,
+                                   DPLL_STATUS,
+                                   DPLL_LOCK(pll->id),
+                                   DPLL_LOCK(pll->id),
+                                   5))
                DRM_ERROR("DPLL %d not locked\n", pll->id);
 }
 
@@ -1219,7 +1226,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                         DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
                         DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
                         wrpll_params.central_freq;
-       } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+       } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_DP_MST ||
                   encoder->type == INTEL_OUTPUT_EDP) {
                switch (crtc_state->port_clock / 2) {
@@ -1236,9 +1243,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                case 162000:
                        ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
                        break;
-               /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
-               results in CDCLK change. Need to handle the change of CDCLK by
-               disabling pipes and re-enabling them */
                case 108000:
                        ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
                        break;
@@ -1374,8 +1378,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
        I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
        POSTING_READ(BXT_PORT_PLL_ENABLE(port));
 
-       if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
-                       PORT_PLL_LOCK), 200))
+       if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+                       200))
                DRM_ERROR("PLL %d not locked\n", port);
 
        /*
@@ -1508,7 +1512,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        int clock = crtc_state->port_clock;
 
        if (encoder->type == INTEL_OUTPUT_HDMI) {
-               intel_clock_t best_clock;
+               struct dpll best_clock;
 
                /* Calculate HDMI div */
                /*
@@ -1530,7 +1534,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                clk_div.m2_frac_en = clk_div.m2_frac != 0;
 
                vco = best_clock.vco;
-       } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+       } else if (encoder->type == INTEL_OUTPUT_DP ||
                   encoder->type == INTEL_OUTPUT_EDP) {
                int i;
 
@@ -1613,8 +1617,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
        i = (enum intel_dpll_id) intel_dig_port->port;
        pll = intel_get_shared_dpll_by_id(dev_priv, i);
 
-       DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
-               crtc->base.base.id, pll->name);
+       DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
+                     crtc->base.base.id, crtc->base.name, pll->name);
 
        intel_reference_shared_dpll(pll, crtc_state);
 
@@ -1632,19 +1636,11 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
 
 static void intel_ddi_pll_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t val = I915_READ(LCPLL_CTL);
-
-       if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
-               int cdclk_freq;
-
-               cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
-               dev_priv->skl_boot_cdclk = cdclk_freq;
-               if (skl_sanitize_cdclk(dev_priv))
-                       DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
-               if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
-                       DRM_ERROR("LCPLL1 is disabled\n");
-       } else if (!IS_BROXTON(dev_priv)) {
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       if (INTEL_GEN(dev_priv) < 9) {
+               uint32_t val = I915_READ(LCPLL_CTL);
+
                /*
                 * The LCPLL register should be turned on by the BIOS. For now
                 * let's just check its state and print errors in case
@@ -1727,7 +1723,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
 
 void intel_shared_dpll_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct intel_dpll_mgr *dpll_mgr = NULL;
        const struct dpll_info *dpll_info;
        int i;
index a28b4aac1e02354c0193109e27ffada19cbbbfbc..3329fc6a95f488b9e74b8f7a6deea457976a92b6 100644 (file)
 })
 
 #define wait_for(COND, MS)             _wait_for((COND), (MS) * 1000, 1000)
-#define wait_for_us(COND, US)          _wait_for((COND), (US), 1)
 
 /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
 #if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
 #else
-# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
 #endif
 
-#define _wait_for_atomic(COND, US) ({ \
-       unsigned long end__; \
-       int ret__ = 0; \
-       _WAIT_FOR_ATOMIC_CHECK; \
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+       int cpu, ret, timeout = (US) * 1000; \
+       u64 base; \
+       _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
        BUILD_BUG_ON((US) > 50000); \
-       end__ = (local_clock() >> 10) + (US) + 1; \
-       while (!(COND)) { \
-               if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
-                       /* Unlike the regular wait_for(), this atomic variant \
-                        * cannot be preempted (and we'll just ignore the issue\
-                        * of irq interruptions) and so we know that no time \
-                        * has passed since the last check of COND and can \
-                        * immediately report the timeout. \
-                        */ \
-                       ret__ = -ETIMEDOUT; \
+       if (!(ATOMIC)) { \
+               preempt_disable(); \
+               cpu = smp_processor_id(); \
+       } \
+       base = local_clock(); \
+       for (;;) { \
+               u64 now = local_clock(); \
+               if (!(ATOMIC)) \
+                       preempt_enable(); \
+               if (COND) { \
+                       ret = 0; \
+                       break; \
+               } \
+               if (now - base >= timeout) { \
+                       ret = -ETIMEDOUT; \
                        break; \
                } \
                cpu_relax(); \
+               if (!(ATOMIC)) { \
+                       preempt_disable(); \
+                       if (unlikely(cpu != smp_processor_id())) { \
+                               timeout -= now - base; \
+                               cpu = smp_processor_id(); \
+                               base = local_clock(); \
+                       } \
+               } \
        } \
+       ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+       int ret__; \
+       BUILD_BUG_ON(!__builtin_constant_p(US)); \
+       if ((US) > 10) \
+               ret__ = _wait_for((COND), (US), 10); \
+       else \
+               ret__ = _wait_for_atomic((COND), (US), 0); \
        ret__; \
 })
 
-#define wait_for_atomic(COND, MS)      _wait_for_atomic((COND), (MS) * 1000)
-#define wait_for_atomic_us(COND, US)   _wait_for_atomic((COND), (US))
+#define wait_for_atomic(COND, MS)      _wait_for_atomic((COND), (MS) * 1000, 1)
+#define wait_for_atomic_us(COND, US)   _wait_for_atomic((COND), (US), 1)
 
 #define KHz(x) (1000 * (x))
 #define MHz(x) KHz(1000 * (x))
@@ -135,7 +159,7 @@ enum intel_output_type {
        INTEL_OUTPUT_LVDS = 4,
        INTEL_OUTPUT_TVOUT = 5,
        INTEL_OUTPUT_HDMI = 6,
-       INTEL_OUTPUT_DISPLAYPORT = 7,
+       INTEL_OUTPUT_DP = 7,
        INTEL_OUTPUT_EDP = 8,
        INTEL_OUTPUT_DSI = 9,
        INTEL_OUTPUT_UNKNOWN = 10,
@@ -159,6 +183,7 @@ struct intel_framebuffer {
 struct intel_fbdev {
        struct drm_fb_helper helper;
        struct intel_framebuffer *fb;
+       async_cookie_t cookie;
        int preferred_bpp;
 };
 
@@ -242,14 +267,6 @@ struct intel_connector {
         * and active (i.e. dpms ON state). */
        bool (*get_hw_state)(struct intel_connector *);
 
-       /*
-        * Removes all interfaces through which the connector is accessible
-        * - like sysfs, debugfs entries -, so that no new operations can be
-        * started on the connector. Also makes sure all currently pending
-        * operations finish before returing.
-        */
-       void (*unregister)(struct intel_connector *);
-
        /* Panel info for eDP and LVDS */
        struct intel_panel panel;
 
@@ -266,7 +283,7 @@ struct intel_connector {
        struct intel_dp *mst_port;
 };
 
-typedef struct dpll {
+struct dpll {
        /* given values */
        int n;
        int m1, m2;
@@ -276,7 +293,7 @@ typedef struct dpll {
        int     vco;
        int     m;
        int     p;
-} intel_clock_t;
+};
 
 struct intel_atomic_state {
        struct drm_atomic_state base;
@@ -291,17 +308,32 @@ struct intel_atomic_state {
 
        bool dpll_set, modeset;
 
+       /*
+        * Does this transaction change the pipes that are active?  This mask
+        * tracks which CRTC's have changed their active state at the end of
+        * the transaction (not counting the temporary disable during modesets).
+        * This mask should only be non-zero when intel_state->modeset is true,
+        * but the converse is not necessarily true; simply changing a mode may
+        * not flip the final active status of any CRTC's
+        */
+       unsigned int active_pipe_changes;
+
        unsigned int active_crtcs;
        unsigned int min_pixclk[I915_MAX_PIPES];
 
+       /* SKL/KBL Only */
+       unsigned int cdclk_pll_vco;
+
        struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
-       struct intel_wm_config wm_config;
 
        /*
         * Current watermarks can't be trusted during hardware readout, so
         * don't bother calculating intermediate watermarks.
         */
        bool skip_intermediate_wm;
+
+       /* Gen9+ only */
+       struct skl_wm_values wm_results;
 };
 
 struct intel_plane_state {
@@ -405,6 +437,48 @@ struct skl_pipe_wm {
        uint32_t linetime;
 };
 
+struct intel_crtc_wm_state {
+       union {
+               struct {
+                       /*
+                        * Intermediate watermarks; these can be
+                        * programmed immediately since they satisfy
+                        * both the current configuration we're
+                        * switching away from and the new
+                        * configuration we're switching to.
+                        */
+                       struct intel_pipe_wm intermediate;
+
+                       /*
+                        * Optimal watermarks, programmed post-vblank
+                        * when this state is committed.
+                        */
+                       struct intel_pipe_wm optimal;
+               } ilk;
+
+               struct {
+                       /* gen9+ only needs 1-step wm programming */
+                       struct skl_pipe_wm optimal;
+
+                       /* cached plane data rate */
+                       unsigned plane_data_rate[I915_MAX_PLANES];
+                       unsigned plane_y_data_rate[I915_MAX_PLANES];
+
+                       /* minimum block allocation */
+                       uint16_t minimum_blocks[I915_MAX_PLANES];
+                       uint16_t minimum_y_blocks[I915_MAX_PLANES];
+               } skl;
+       };
+
+       /*
+        * Platforms with two-step watermark programming will need to
+        * update watermark programming post-vblank to switch from the
+        * safe intermediate watermarks to the optimal final
+        * watermarks.
+        */
+       bool need_postvbl_update;
+};
+
 struct intel_crtc_state {
        struct drm_crtc_state base;
 
@@ -448,12 +522,10 @@ struct intel_crtc_state {
         */
        bool limited_color_range;
 
-       /* DP has a bunch of special case unfortunately, so mark the pipe
-        * accordingly. */
-       bool has_dp_encoder;
-
-       /* DSI has special cases */
-       bool has_dsi_encoder;
+       /* Bitmask of encoder types (enum intel_output_type)
+        * driven by the pipe.
+        */
+       unsigned int output_types;
 
        /* Whether we should send NULL infoframes. Required for audio. */
        bool has_hdmi_sink;
@@ -522,6 +594,12 @@ struct intel_crtc_state {
 
        uint8_t lane_count;
 
+       /*
+        * Used by platforms having DP/HDMI PHY with programmable lane
+        * latency optimization.
+        */
+       uint8_t lane_lat_optim_mask;
+
        /* Panel fitter controls for gen2-gen4 + VLV */
        struct {
                u32 control;
@@ -558,32 +636,7 @@ struct intel_crtc_state {
        /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
        bool disable_lp_wm;
 
-       struct {
-               /*
-                * Optimal watermarks, programmed post-vblank when this state
-                * is committed.
-                */
-               union {
-                       struct intel_pipe_wm ilk;
-                       struct skl_pipe_wm skl;
-               } optimal;
-
-               /*
-                * Intermediate watermarks; these can be programmed immediately
-                * since they satisfy both the current configuration we're
-                * switching away from and the new configuration we're switching
-                * to.
-                */
-               struct intel_pipe_wm intermediate;
-
-               /*
-                * Platforms with two-step watermark programming will need to
-                * update watermark programming post-vblank to switch from the
-                * safe intermediate watermarks to the optimal final
-                * watermarks.
-                */
-               bool need_postvbl_update;
-       } wm;
+       struct intel_crtc_wm_state wm;
 
        /* Gamma mode programmed on the pipe */
        uint32_t gamma_mode;
@@ -598,14 +651,6 @@ struct vlv_wm_state {
        bool cxsr;
 };
 
-struct intel_mmio_flip {
-       struct work_struct work;
-       struct drm_i915_private *i915;
-       struct drm_i915_gem_request *req;
-       struct intel_crtc *crtc;
-       unsigned int rotation;
-};
-
 struct intel_crtc {
        struct drm_crtc base;
        enum pipe pipe;
@@ -620,7 +665,7 @@ struct intel_crtc {
        unsigned long enabled_power_domains;
        bool lowfreq_avail;
        struct intel_overlay *overlay;
-       struct intel_unpin_work *unpin_work;
+       struct intel_flip_work *flip_work;
 
        atomic_t unpin_work_count;
 
@@ -815,6 +860,7 @@ struct intel_dp {
        uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
        uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
        uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+       uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
        /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
        uint8_t num_sink_rates;
        int sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -838,6 +884,11 @@ struct intel_dp {
         * this port. Only relevant on VLV/CHV.
         */
        enum pipe pps_pipe;
+       /*
+        * Set if the sequencer may be reset due to a power transition,
+        * requiring a reinitialization. Only relevant on BXT.
+        */
+       bool pps_reset;
        struct edp_power_seq pps_delays;
 
        bool can_mst; /* this port supports mst */
@@ -863,8 +914,6 @@ struct intel_dp {
        /* This is called before a link training is starterd */
        void (*prepare_link_retrain)(struct intel_dp *intel_dp);
 
-       bool train_set_valid;
-
        /* Displayport compliance testing */
        unsigned long compliance_test_type;
        unsigned long compliance_test_data;
@@ -936,33 +985,32 @@ vlv_pipe_to_channel(enum pipe pipe)
 static inline struct drm_crtc *
 intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        return dev_priv->pipe_to_crtc_mapping[pipe];
 }
 
 static inline struct drm_crtc *
 intel_get_crtc_for_plane(struct drm_device *dev, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        return dev_priv->plane_to_crtc_mapping[plane];
 }
 
-struct intel_unpin_work {
-       struct work_struct work;
+struct intel_flip_work {
+       struct work_struct unpin_work;
+       struct work_struct mmio_work;
+
        struct drm_crtc *crtc;
        struct drm_framebuffer *old_fb;
        struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
        atomic_t pending;
-#define INTEL_FLIP_INACTIVE    0
-#define INTEL_FLIP_PENDING     1
-#define INTEL_FLIP_COMPLETE    2
        u32 flip_count;
        u32 gtt_offset;
        struct drm_i915_gem_request *flip_queued_req;
        u32 flip_queued_vblank;
        u32 flip_ready_vblank;
-       bool enable_stall_check;
+       unsigned int rotation;
 };
 
 struct intel_load_detect_pipe {
@@ -1031,9 +1079,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen6_reset_rps_interrupts(struct drm_device *dev);
-void gen6_enable_rps_interrupts(struct drm_device *dev);
-void gen6_disable_rps_interrupts(struct drm_device *dev);
+void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
@@ -1054,7 +1102,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
 
 /* intel_crt.c */
 void intel_crt_init(struct drm_device *dev);
-
+void intel_crt_reset(struct drm_encoder *encoder);
 
 /* intel_ddi.c */
 void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -1112,14 +1160,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
 void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
 
 /* intel_display.c */
+void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
+void intel_update_rawclk(struct drm_i915_private *dev_priv);
 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
                      const char *name, u32 reg, int ref_freq);
 extern const struct drm_plane_funcs intel_plane_funcs;
 void intel_init_display_hooks(struct drm_i915_private *dev_priv);
 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
 bool intel_has_pending_fb_unpin(struct drm_device *dev);
-void intel_mark_busy(struct drm_device *dev);
-void intel_mark_idle(struct drm_device *dev);
+void intel_mark_busy(struct drm_i915_private *dev_priv);
+void intel_mark_idle(struct drm_i915_private *dev_priv);
 void intel_crtc_restore_mode(struct drm_crtc *crtc);
 int intel_display_suspend(struct drm_device *dev);
 void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1128,7 +1178,6 @@ struct intel_connector *intel_connector_alloc(void);
 bool intel_connector_get_hw_state(struct intel_connector *connector);
 void intel_connector_attach_encoder(struct intel_connector *connector,
                                    struct intel_encoder *encoder);
-struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
                                             struct drm_crtc *crtc);
 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
@@ -1136,7 +1185,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
                                             enum pipe pipe);
-bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
+static inline bool
+intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
+                   enum intel_output_type type)
+{
+       return crtc_state->output_types & (1 << type);
+}
+static inline bool
+intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
+{
+       return crtc_state->output_types &
+               ((1 << INTEL_OUTPUT_DP) |
+                (1 << INTEL_OUTPUT_DP_MST) |
+                (1 << INTEL_OUTPUT_EDP));
+}
 static inline void
 intel_wait_for_vblank(struct drm_device *dev, int pipe)
 {
@@ -1151,6 +1213,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
        if (crtc->active)
                intel_wait_for_vblank(dev, pipe);
 }
+
+u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+
 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
                         struct intel_digital_port *dport,
@@ -1164,14 +1229,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                                    struct drm_modeset_acquire_ctx *ctx);
 int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
                               unsigned int rotation);
+void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
                           struct drm_mode_fb_cmd2 *mode_cmd,
                           struct drm_i915_gem_object *obj);
-void intel_prepare_page_flip(struct drm_device *dev, int plane);
-void intel_finish_page_flip(struct drm_device *dev, int pipe);
-void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
-void intel_check_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
+void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
+void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
 int intel_prepare_plane_fb(struct drm_plane *plane,
                           const struct drm_plane_state *new_state);
 void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1228,23 +1293,25 @@ u32 intel_compute_tile_offset(int *x, int *y,
                              const struct drm_framebuffer *fb, int plane,
                              unsigned int pitch,
                              unsigned int rotation);
-void intel_prepare_reset(struct drm_device *dev);
-void intel_finish_reset(struct drm_device *dev);
+void intel_prepare_reset(struct drm_i915_private *dev_priv);
+void intel_finish_reset(struct drm_i915_private *dev_priv);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
 void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void broxton_init_cdclk(struct drm_i915_private *dev_priv);
-void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
-bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
-void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
-void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
-void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
+void bxt_init_cdclk(struct drm_i915_private *dev_priv);
+void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
+void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
+bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
+                           enum dpio_phy phy);
+bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
+                             enum dpio_phy phy);
 void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
 void bxt_enable_dc9(struct drm_i915_private *dev_priv);
 void bxt_disable_dc9(struct drm_i915_private *dev_priv);
 void gen9_enable_dc5(struct drm_i915_private *dev_priv);
 void skl_init_cdclk(struct drm_i915_private *dev_priv);
-int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
 void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
+unsigned int skl_cdclk_get_vco(unsigned int freq);
 void skl_enable_dc6(struct drm_i915_private *dev_priv);
 void skl_disable_dc6(struct drm_i915_private *dev_priv);
 void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -1252,8 +1319,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
 int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
-                       intel_clock_t *best_clock);
-int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
+                       struct dpll *best_clock);
+int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
 
 bool intel_crtc_active(struct drm_crtc *crtc);
 void hsw_enable_ips(struct intel_crtc *crtc);
@@ -1284,7 +1351,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
 void intel_csr_ucode_resume(struct drm_i915_private *);
 
 /* intel_dp.c */
-void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
+bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                             struct intel_connector *intel_connector);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -1312,7 +1379,7 @@ void intel_dp_mst_resume(struct drm_device *dev);
 int intel_dp_max_link_rate(struct intel_dp *intel_dp);
 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
 void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
-void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
 void intel_plane_destroy(struct drm_plane *plane);
 void intel_edp_drrs_enable(struct intel_dp *intel_dp);
@@ -1339,15 +1406,27 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
 bool
 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
 
+static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+       return ~((1 << lane_count) - 1) & 0xf;
+}
+
+/* intel_dp_aux_backlight.c */
+int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
+
 /* intel_dp_mst.c */
 int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
 void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
 /* intel_dsi.c */
 void intel_dsi_init(struct drm_device *dev);
 
+/* intel_dsi_dcs_backlight.c */
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
 
 /* intel_dvo.c */
 void intel_dvo_init(struct drm_device *dev);
+/* intel_hotplug.c */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
 
 
 /* legacy fbdev emulation in intel_fbdev.c */
@@ -1385,11 +1464,15 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
                           struct drm_atomic_state *state);
 bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc);
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+                         struct intel_crtc_state *crtc_state,
+                         struct intel_plane_state *plane_state);
 void intel_fbc_post_update(struct intel_crtc *crtc);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
 void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc);
+void intel_fbc_enable(struct intel_crtc *crtc,
+                     struct intel_crtc_state *crtc_state,
+                     struct intel_plane_state *plane_state);
 void intel_fbc_disable(struct intel_crtc *crtc);
 void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
@@ -1411,6 +1494,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
 
 /* intel_lvds.c */
 void intel_lvds_init(struct drm_device *dev);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
 bool intel_is_dual_link_lvds(struct drm_device *dev);
 
 
@@ -1424,13 +1508,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
 
 
 /* intel_overlay.c */
-void intel_setup_overlay(struct drm_device *dev);
-void intel_cleanup_overlay(struct drm_device *dev);
+void intel_setup_overlay(struct drm_i915_private *dev_priv);
+void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
 int intel_overlay_switch_off(struct intel_overlay *overlay);
-int intel_overlay_put_image(struct drm_device *dev, void *data,
-                           struct drm_file *file_priv);
-int intel_overlay_attrs(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv);
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv);
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
 void intel_overlay_reset(struct drm_i915_private *dev_priv);
 
 
@@ -1449,7 +1533,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
                              int fitting_mode);
 void intel_panel_set_backlight_acpi(struct intel_connector *connector,
                                    u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
+int intel_panel_setup_backlight(struct drm_connector *connector,
+                               enum pipe pipe);
 void intel_panel_enable_backlight(struct intel_connector *connector);
 void intel_panel_disable_backlight(struct intel_connector *connector);
 void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1458,8 +1543,19 @@ extern struct drm_display_mode *intel_find_panel_downclock(
                                struct drm_device *dev,
                                struct drm_display_mode *fixed_mode,
                                struct drm_connector *connector);
-void intel_backlight_register(struct drm_device *dev);
-void intel_backlight_unregister(struct drm_device *dev);
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
+void intel_backlight_device_unregister(struct intel_connector *connector);
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static int intel_backlight_device_register(struct intel_connector *connector)
+{
+       return 0;
+}
+static inline void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
 
 
 /* intel_psr.c */
@@ -1601,21 +1697,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
 void intel_pm_setup(struct drm_device *dev);
 void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
 void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_device *dev);
-void intel_cleanup_gt_powersave(struct drm_device *dev);
-void intel_enable_gt_powersave(struct drm_device *dev);
-void intel_disable_gt_powersave(struct drm_device *dev);
-void intel_suspend_gt_powersave(struct drm_device *dev);
-void intel_reset_gt_powersave(struct drm_device *dev);
-void gen6_update_ring_freq(struct drm_device *dev);
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
 void gen6_rps_busy(struct drm_i915_private *dev_priv);
 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
 void gen6_rps_idle(struct drm_i915_private *dev_priv);
 void gen6_rps_boost(struct drm_i915_private *dev_priv,
                    struct intel_rps_client *rps,
                    unsigned long submitted);
-void intel_queue_rps_boost_for_request(struct drm_device *dev,
-                                      struct drm_i915_gem_request *req);
+void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
 void vlv_wm_get_hw_state(struct drm_device *dev);
 void ilk_wm_get_hw_state(struct drm_device *dev);
 void skl_wm_get_hw_state(struct drm_device *dev);
@@ -1623,7 +1718,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
                          struct skl_ddb_allocation *ddb /* out */);
 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
 bool ilk_disable_lp_wm(struct drm_device *dev);
-int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
+int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
+static inline int intel_enable_rc6(void)
+{
+       return i915.enable_rc6;
+}
 
 /* intel_sdvo.c */
 bool intel_sdvo_init(struct drm_device *dev,
@@ -1635,7 +1734,7 @@ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 void intel_pipe_update_start(struct intel_crtc *crtc);
-void intel_pipe_update_end(struct intel_crtc *crtc);
+void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
 
 /* intel_tv.c */
 void intel_tv_init(struct drm_device *dev);
index 366ad6c67ce4938e6f463b2fcbb36f008aafad90..de8e9fb51595b984780984ba29408c8ed0efee6b 100644 (file)
@@ -84,13 +84,15 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
 {
        struct drm_encoder *encoder = &intel_dsi->base.base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 mask;
 
        mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
                LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
 
-       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100))
+       if (intel_wait_for_register(dev_priv,
+                                   MIPI_GEN_FIFO_STAT(port), mask, mask,
+                                   100))
                DRM_ERROR("DPI FIFOs are not empty\n");
 }
 
@@ -129,7 +131,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
 {
        struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
        struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dsi_host->port;
        struct mipi_dsi_packet packet;
        ssize_t ret;
@@ -158,8 +160,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
 
        /* note: this is never true for reads */
        if (packet.payload_length) {
-
-               if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50))
+               if (intel_wait_for_register(dev_priv,
+                                           MIPI_GEN_FIFO_STAT(port),
+                                           data_mask, 0,
+                                           50))
                        DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
 
                write_data(dev_priv, data_reg, packet.payload,
@@ -170,7 +174,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
                I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
        }
 
-       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) {
+       if (intel_wait_for_register(dev_priv,
+                                   MIPI_GEN_FIFO_STAT(port),
+                                   ctrl_mask, 0,
+                                   50)) {
                DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
        }
 
@@ -179,7 +186,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
        /* ->rx_len is set only for reads */
        if (msg->rx_len) {
                data_mask = GEN_READ_DATA_AVAIL;
-               if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50))
+               if (intel_wait_for_register(dev_priv,
+                                           MIPI_INTR_STAT(port),
+                                           data_mask, data_mask,
+                                           50))
                        DRM_ERROR("Timeout waiting for read data.\n");
 
                read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -250,7 +260,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
 {
        struct drm_encoder *encoder = &intel_dsi->base.base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 mask;
 
        /* XXX: pipe, hs */
@@ -269,7 +279,9 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
        I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
 
        mask = SPL_PKT_SENT_INTERRUPT;
-       if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100))
+       if (intel_wait_for_register(dev_priv,
+                                   MIPI_INTR_STAT(port), mask, mask,
+                                   100))
                DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
 
        return 0;
@@ -302,7 +314,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
 static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
                                                   base);
        struct intel_connector *intel_connector = intel_dsi->attached_connector;
@@ -313,8 +325,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 
        DRM_DEBUG_KMS("\n");
 
-       pipe_config->has_dsi_encoder = true;
-
        if (fixed_mode) {
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
@@ -348,7 +358,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
 
 static void bxt_dsi_device_ready(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
        u32 val;
@@ -387,7 +397,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
 
 static void vlv_dsi_device_ready(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
        u32 val;
@@ -437,7 +447,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
 static void intel_dsi_port_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
@@ -478,7 +488,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
 static void intel_dsi_port_disable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
 
@@ -497,7 +507,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
 static void intel_dsi_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
 
@@ -528,11 +538,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
 static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        enum port port;
-       u32 tmp;
 
        DRM_DEBUG_KMS("\n");
 
@@ -551,11 +560,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 
        msleep(intel_dsi->panel_on_delay);
 
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+               u32 val;
+
                /* Disable DPOunit clock gating, can stall pipe */
-               tmp = I915_READ(DSPCLK_GATE_D);
-               tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
-               I915_WRITE(DSPCLK_GATE_D, tmp);
+               val = I915_READ(DSPCLK_GATE_D);
+               val |= DPOUNIT_CLOCK_GATE_DISABLE;
+               I915_WRITE(DSPCLK_GATE_D, val);
        }
 
        /* put device in ready state */
@@ -601,7 +612,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
 static void intel_dsi_disable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
        u32 temp;
@@ -640,7 +651,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
 static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
 
@@ -666,8 +677,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
                /* Wait till Clock lanes are in LP-00 state for MIPI Port A
                 * only. MIPI Port C has no similar bit for checking
                 */
-               if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT)
-                                               == 0x00000), 30))
+               if (intel_wait_for_register(dev_priv,
+                                           port_ctrl, AFE_LATCHOUT, 0,
+                                           30))
                        DRM_ERROR("DSI LP not going Low\n");
 
                /* Disable MIPI PHY transparent latch */
@@ -684,7 +696,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 
 static void intel_dsi_post_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
        DRM_DEBUG_KMS("\n");
@@ -693,7 +705,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
 
        intel_dsi_clear_device_ready(encoder);
 
-       if (!IS_BROXTON(dev_priv)) {
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                u32 val;
 
                val = I915_READ(DSPCLK_GATE_D);
@@ -719,7 +731,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        enum intel_display_power_domain power_domain;
@@ -793,7 +805,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
                                 struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *adjusted_mode =
                                        &pipe_config->base.adjusted_mode;
        struct drm_display_mode *adjusted_mode_sw;
@@ -953,8 +965,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
        u32 pclk;
        DRM_DEBUG_KMS("\n");
 
-       pipe_config->has_dsi_encoder = true;
-
        if (IS_BROXTON(dev))
                bxt_dsi_get_pipe_config(encoder, pipe_config);
 
@@ -1012,7 +1022,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
                            const struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
        enum port port;
        unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1098,7 +1108,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
        const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1171,6 +1181,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
        if (intel_dsi->clock_stop)
                tmp |= CLOCKSTOP;
 
+       if (IS_BROXTON(dev_priv)) {
+               tmp |= BXT_DPHY_DEFEATURE_EN;
+               if (!is_cmd_mode(intel_dsi))
+                       tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
+       }
+
        for_each_dsi_port(port, intel_dsi->ports) {
                I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
 
@@ -1378,12 +1394,13 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
 static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
        .get_modes = intel_dsi_get_modes,
        .mode_valid = intel_dsi_mode_valid,
-       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_connector_funcs intel_dsi_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_dsi_detect,
+       .late_register = intel_connector_register,
+       .early_unregister = intel_connector_unregister,
        .destroy = intel_dsi_connector_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_dsi_set_property,
@@ -1413,7 +1430,7 @@ void intel_dsi_init(struct drm_device *dev)
        struct intel_connector *intel_connector;
        struct drm_connector *connector;
        struct drm_display_mode *scan, *fixed_mode = NULL;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port;
        unsigned int i;
 
@@ -1449,7 +1466,7 @@ void intel_dsi_init(struct drm_device *dev)
        connector = &intel_connector->base;
 
        drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
-                        NULL);
+                        "DSI %c", port_name(port));
 
        intel_encoder->compute_config = intel_dsi_compute_config;
        intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1460,7 +1477,6 @@ void intel_dsi_init(struct drm_device *dev)
        intel_encoder->get_config = intel_dsi_get_config;
 
        intel_connector->get_hw_state = intel_connector_get_hw_state;
-       intel_connector->unregister = intel_connector_unregister;
 
        /*
         * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
@@ -1473,10 +1489,42 @@ void intel_dsi_init(struct drm_device *dev)
        else
                intel_encoder->crtc_mask = BIT(PIPE_B);
 
-       if (dev_priv->vbt.dsi.config->dual_link)
+       if (dev_priv->vbt.dsi.config->dual_link) {
                intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
-       else
+
+               switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
+               case DL_DCS_PORT_A:
+                       intel_dsi->dcs_backlight_ports = BIT(PORT_A);
+                       break;
+               case DL_DCS_PORT_C:
+                       intel_dsi->dcs_backlight_ports = BIT(PORT_C);
+                       break;
+               default:
+               case DL_DCS_PORT_A_AND_C:
+                       intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
+                       break;
+               }
+
+               switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
+               case DL_DCS_PORT_A:
+                       intel_dsi->dcs_cabc_ports = BIT(PORT_A);
+                       break;
+               case DL_DCS_PORT_C:
+                       intel_dsi->dcs_cabc_ports = BIT(PORT_C);
+                       break;
+               default:
+               case DL_DCS_PORT_A_AND_C:
+                       intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
+                       break;
+               }
+       } else {
                intel_dsi->ports = BIT(port);
+               intel_dsi->dcs_backlight_ports = BIT(port);
+               intel_dsi->dcs_cabc_ports = BIT(port);
+       }
+
+       if (!dev_priv->vbt.dsi.config->cabc_supported)
+               intel_dsi->dcs_cabc_ports = 0;
 
        /* Create a DSI host (and a device) for each port. */
        for_each_dsi_port(port, intel_dsi->ports) {
@@ -1545,14 +1593,14 @@ void intel_dsi_init(struct drm_device *dev)
                goto err;
        }
 
+       connector->display_info.width_mm = fixed_mode->width_mm;
+       connector->display_info.height_mm = fixed_mode->height_mm;
+
        intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+       intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        intel_dsi_add_properties(intel_connector);
 
-       drm_connector_register(connector);
-
-       intel_panel_setup_backlight(connector, INVALID_PIPE);
-
        return;
 
 err:
index 61a6957fc6c2ce2e2192e04d0c504be864972ec8..5967ea6d60450b55e83de1fd1bfafe183d8b3660 100644 (file)
@@ -78,6 +78,10 @@ struct intel_dsi {
 
        u8 escape_clk_div;
        u8 dual_link;
+
+       u16 dcs_backlight_ports;
+       u16 dcs_cabc_ports;
+
        u8 pixel_overlap;
        u32 port_bits;
        u32 bw_timer;
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
new file mode 100644 (file)
index 0000000..ac7c602
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Deepak M <m.deepak at intel.com>
+ */
+
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "i915_drv.h"
+#include <video/mipi_display.h>
+#include <drm/drm_mipi_dsi.h>
+
+#define CONTROL_DISPLAY_BCTRL          (1 << 5)
+#define CONTROL_DISPLAY_DD             (1 << 3)
+#define CONTROL_DISPLAY_BL             (1 << 2)
+
+#define POWER_SAVE_OFF                 (0 << 0)
+#define POWER_SAVE_LOW                 (1 << 0)
+#define POWER_SAVE_MEDIUM              (2 << 0)
+#define POWER_SAVE_HIGH                        (3 << 0)
+#define POWER_SAVE_OUTDOOR_MODE                (4 << 0)
+
+#define PANEL_PWM_MAX_VALUE            0xFF
+
+static u32 dcs_get_backlight(struct intel_connector *connector)
+{
+       struct intel_encoder *encoder = connector->encoder;
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       struct mipi_dsi_device *dsi_device;
+       u8 data;
+       enum port port;
+
+       /* FIXME: Need to take care of 16 bit brightness level */
+       for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+               dsi_device = intel_dsi->dsi_hosts[port]->device;
+               mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+                                 &data, sizeof(data));
+               break;
+       }
+
+       return data;
+}
+
+static void dcs_set_backlight(struct intel_connector *connector, u32 level)
+{
+       struct intel_encoder *encoder = connector->encoder;
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       struct mipi_dsi_device *dsi_device;
+       u8 data = level;
+       enum port port;
+
+       /* FIXME: Need to take care of 16 bit brightness level */
+       for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+               dsi_device = intel_dsi->dsi_hosts[port]->device;
+               mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+                                  &data, sizeof(data));
+       }
+}
+
+static void dcs_disable_backlight(struct intel_connector *connector)
+{
+       struct intel_encoder *encoder = connector->encoder;
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       struct mipi_dsi_device *dsi_device;
+       enum port port;
+
+       dcs_set_backlight(connector, 0);
+
+       for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+               u8 cabc = POWER_SAVE_OFF;
+
+               dsi_device = intel_dsi->dsi_hosts[port]->device;
+               mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+                                  &cabc, sizeof(cabc));
+       }
+
+       for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+               u8 ctrl = 0;
+
+               dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+               mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+                                 &ctrl, sizeof(ctrl));
+
+               ctrl &= ~CONTROL_DISPLAY_BL;
+               ctrl &= ~CONTROL_DISPLAY_DD;
+               ctrl &= ~CONTROL_DISPLAY_BCTRL;
+
+               mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+                                  &ctrl, sizeof(ctrl));
+       }
+}
+
+static void dcs_enable_backlight(struct intel_connector *connector)
+{
+       struct intel_encoder *encoder = connector->encoder;
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       struct intel_panel *panel = &connector->panel;
+       struct mipi_dsi_device *dsi_device;
+       enum port port;
+
+       for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
+               u8 ctrl = 0;
+
+               dsi_device = intel_dsi->dsi_hosts[port]->device;
+
+               mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
+                                 &ctrl, sizeof(ctrl));
+
+               ctrl |= CONTROL_DISPLAY_BL;
+               ctrl |= CONTROL_DISPLAY_DD;
+               ctrl |= CONTROL_DISPLAY_BCTRL;
+
+               mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+                                  &ctrl, sizeof(ctrl));
+       }
+
+       for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
+               u8 cabc = POWER_SAVE_MEDIUM;
+
+               dsi_device = intel_dsi->dsi_hosts[port]->device;
+               mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
+                                  &cabc, sizeof(cabc));
+       }
+
+       dcs_set_backlight(connector, panel->backlight.level);
+}
+
+static int dcs_setup_backlight(struct intel_connector *connector,
+                              enum pipe unused)
+{
+       struct intel_panel *panel = &connector->panel;
+
+       panel->backlight.max = PANEL_PWM_MAX_VALUE;
+       panel->backlight.level = PANEL_PWM_MAX_VALUE;
+
+       return 0;
+}
+
+int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
+{
+       struct drm_device *dev = intel_connector->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_encoder *encoder = intel_connector->encoder;
+       struct intel_panel *panel = &intel_connector->panel;
+
+       if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
+               return -ENODEV;
+
+       if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
+               return -EINVAL;
+
+       panel->backlight.setup = dcs_setup_backlight;
+       panel->backlight.enable = dcs_enable_backlight;
+       panel->backlight.disable = dcs_disable_backlight;
+       panel->backlight.set = dcs_set_backlight;
+       panel->backlight.get = dcs_get_backlight;
+
+       return 0;
+}
index e498f1c3221e368a5d2d53b20c609b7633baa744..cd154ce6b6c1ed8edad276dec2477532bbf52415 100644 (file)
@@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = {
        { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
 };
 
+#define CHV_GPIO_IDX_START_N           0
+#define CHV_GPIO_IDX_START_E           73
+#define CHV_GPIO_IDX_START_SW          100
+#define CHV_GPIO_IDX_START_SE          198
+
+#define CHV_VBT_MAX_PINS_PER_FMLY      15
+
+#define CHV_GPIO_PAD_CFG0(f, i)                (0x4400 + (f) * 0x400 + (i) * 8)
+#define  CHV_GPIO_GPIOEN               (1 << 15)
+#define  CHV_GPIO_GPIOCFG_GPIO         (0 << 8)
+#define  CHV_GPIO_GPIOCFG_GPO          (1 << 8)
+#define  CHV_GPIO_GPIOCFG_GPI          (2 << 8)
+#define  CHV_GPIO_GPIOCFG_HIZ          (3 << 8)
+#define  CHV_GPIO_GPIOTXSTATE(state)   ((!!(state)) << 1)
+
+#define CHV_GPIO_PAD_CFG1(f, i)                (0x4400 + (f) * 0x400 + (i) * 8 + 4)
+#define  CHV_GPIO_CFGLOCK              (1 << 31)
+
 static inline enum port intel_dsi_seq_port_to_port(u8 port)
 {
        return port ? PORT_C : PORT_A;
@@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
        map = &vlv_gpio_table[gpio_index];
 
        if (dev_priv->vbt.dsi.seq_version >= 3) {
-               DRM_DEBUG_KMS("GPIO element v3 not supported\n");
-               return;
+               /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
+               port = IOSF_PORT_GPIO_NC;
        } else {
                if (gpio_source == 0) {
                        port = IOSF_PORT_GPIO_NC;
                } else if (gpio_source == 1) {
-                       port = IOSF_PORT_GPIO_SC;
+                       DRM_DEBUG_KMS("SC gpio not supported\n");
+                       return;
                } else {
                        DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
                        return;
@@ -231,10 +250,60 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
        mutex_unlock(&dev_priv->sb_lock);
 }
 
+static void chv_exec_gpio(struct drm_i915_private *dev_priv,
+                         u8 gpio_source, u8 gpio_index, bool value)
+{
+       u16 cfg0, cfg1;
+       u16 family_num;
+       u8 port;
+
+       if (dev_priv->vbt.dsi.seq_version >= 3) {
+               if (gpio_index >= CHV_GPIO_IDX_START_SE) {
+                       /* XXX: it's unclear whether 255->57 is part of SE. */
+                       gpio_index -= CHV_GPIO_IDX_START_SE;
+                       port = CHV_IOSF_PORT_GPIO_SE;
+               } else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
+                       gpio_index -= CHV_GPIO_IDX_START_SW;
+                       port = CHV_IOSF_PORT_GPIO_SW;
+               } else if (gpio_index >= CHV_GPIO_IDX_START_E) {
+                       gpio_index -= CHV_GPIO_IDX_START_E;
+                       port = CHV_IOSF_PORT_GPIO_E;
+               } else {
+                       port = CHV_IOSF_PORT_GPIO_N;
+               }
+       } else {
+               /* XXX: The spec is unclear about CHV GPIO on seq v2 */
+               if (gpio_source != 0) {
+                       DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+                       return;
+               }
+
+               if (gpio_index >= CHV_GPIO_IDX_START_E) {
+                       DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
+                                     gpio_index);
+                       return;
+               }
+
+               port = CHV_IOSF_PORT_GPIO_N;
+       }
+
+       family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
+       gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
+
+       cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
+       cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
+
+       mutex_lock(&dev_priv->sb_lock);
+       vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
+       vlv_iosf_sb_write(dev_priv, port, cfg0,
+                         CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
+       mutex_unlock(&dev_priv->sb_lock);
+}
+
 static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 {
        struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u8 gpio_source, gpio_index;
        bool value;
 
@@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 
        if (IS_VALLEYVIEW(dev_priv))
                vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+       else if (IS_CHERRYVIEW(dev_priv))
+               chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
        else
                DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
 
@@ -398,7 +469,7 @@ static int vbt_panel_get_modes(struct drm_panel *panel)
        struct vbt_panel *vbt_panel = to_vbt_panel(panel);
        struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
        struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_display_mode *mode;
 
        if (!panel->connector)
@@ -426,7 +497,7 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
 struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
 {
        struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
        struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
        struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
@@ -578,14 +649,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
                                );
 
        /*
-        * Exit zero  is unified val ths_zero and ths_exit
+        * Exit zero is unified val ths_zero and ths_exit
         * minimum value for ths_exit = 110ns
         * min (exit_zero_cnt * 2) = 110/UI
         * exit_zero_cnt = 55/UI
         */
-        if (exit_zero_cnt < (55 * ui_den / ui_num))
-               if ((55 * ui_den) % ui_num)
-                       exit_zero_cnt += 1;
+       if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
+               exit_zero_cnt += 1;
 
        /* clk zero count */
        clk_zero_cnt = DIV_ROUND_UP(
index 1765e6e18f2c6e8aa2dfb2ac4d8f08fb9983dc40..6ab58a01b18e2ee0cb20345aac39208c020d7e5b 100644 (file)
@@ -55,12 +55,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
                        struct intel_crtc_state *config,
                        int target_dsi_clk)
 {
-       unsigned int calc_m = 0, calc_p = 0;
        unsigned int m_min, m_max, p_min = 2, p_max = 6;
        unsigned int m, n, p;
-       int ref_clk;
-       int delta = target_dsi_clk;
-       u32 m_seed;
+       unsigned int calc_m, calc_p;
+       int delta, ref_clk;
 
        /* target_dsi_clk is expected in kHz */
        if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
@@ -80,6 +78,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
                m_max = 92;
        }
 
+       calc_p = p_min;
+       calc_m = m_min;
+       delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n));
+
        for (m = m_min; m <= m_max && delta; m++) {
                for (p = p_min; p <= p_max && delta; p++) {
                        /*
@@ -97,11 +99,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
        }
 
        /* register has log2(N1), this works fine for powers of two */
-       n = ffs(n) - 1;
-       m_seed = lfsr_converts[calc_m - 62];
        config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
-       config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
-               m_seed << DSI_PLL_M1_DIV_SHIFT;
+       config->dsi_pll.div =
+               (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT |
+               (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT;
 
        return 0;
 }
@@ -113,7 +114,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
 static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
                               struct intel_crtc_state *config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        int ret;
        u32 dsi_clk;
@@ -234,8 +235,11 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
         * PLL lock should deassert within 200us.
         * Wait up to 1ms before timing out.
         */
-       if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
-                                       & BXT_DSI_PLL_LOCKED) == 0, 1))
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_DSI_PLL_ENABLE,
+                                   BXT_DSI_PLL_LOCKED,
+                                   0,
+                                   1))
                DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
 }
 
@@ -321,7 +325,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
        u32 dsi_clk;
        u32 dsi_ratio;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
        /* Divide by zero */
        if (!pipe_bpp) {
@@ -356,7 +360,7 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
 static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
        u32 temp;
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
 
        temp = I915_READ(MIPI_CTRL(port));
@@ -370,7 +374,7 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
                                   const struct intel_crtc_state *config)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp;
        u32 dsi_rate = 0;
        u32 pll_ratio = 0;
@@ -465,7 +469,7 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
 static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
                               const struct intel_crtc_state *config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
        enum port port;
        u32 val;
@@ -486,7 +490,11 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
        I915_WRITE(BXT_DSI_PLL_ENABLE, val);
 
        /* Timeout and fail if PLL not locked */
-       if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
+       if (intel_wait_for_register(dev_priv,
+                                   BXT_DSI_PLL_ENABLE,
+                                   BXT_DSI_PLL_LOCKED,
+                                   BXT_DSI_PLL_LOCKED,
+                                   1)) {
                DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
                return;
        }
@@ -542,7 +550,7 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
        u32 tmp;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Clear old configurations */
        tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
index 286baec979c84098321cf9039729f96480400fa0..47bdf9dad0d37c093a1b22099c4db0a5099be552 100644 (file)
@@ -122,7 +122,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
 static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
 {
        struct drm_device *dev = connector->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
        u32 tmp;
 
@@ -138,7 +138,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
                                   enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 tmp;
 
@@ -155,7 +155,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
 static void intel_dvo_get_config(struct intel_encoder *encoder,
                                 struct intel_crtc_state *pipe_config)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        u32 tmp, flags = 0;
 
@@ -176,7 +176,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
 
 static void intel_disable_dvo(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
        u32 temp = I915_READ(dvo_reg);
@@ -188,7 +188,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
 
 static void intel_enable_dvo(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
@@ -256,7 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
 static void intel_dvo_pre_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -305,7 +305,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
 
 static int intel_dvo_get_modes(struct drm_connector *connector)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        const struct drm_display_mode *fixed_mode =
                to_intel_connector(connector)->panel.fixed_mode;
 
@@ -341,6 +341,8 @@ static void intel_dvo_destroy(struct drm_connector *connector)
 static const struct drm_connector_funcs intel_dvo_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_dvo_detect,
+       .late_register = intel_connector_register,
+       .early_unregister = intel_connector_unregister,
        .destroy = intel_dvo_destroy,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .atomic_get_property = intel_connector_atomic_get_property,
@@ -351,7 +353,6 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
        .mode_valid = intel_dvo_mode_valid,
        .get_modes = intel_dvo_get_modes,
-       .best_encoder = intel_best_encoder,
 };
 
 static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -378,7 +379,7 @@ static struct drm_display_mode *
 intel_dvo_get_current_mode(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
        uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
        struct drm_display_mode *mode = NULL;
@@ -406,9 +407,21 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
        return mode;
 }
 
+static char intel_dvo_port_name(i915_reg_t dvo_reg)
+{
+       if (i915_mmio_reg_equal(dvo_reg, DVOA))
+               return 'A';
+       else if (i915_mmio_reg_equal(dvo_reg, DVOB))
+               return 'B';
+       else if (i915_mmio_reg_equal(dvo_reg, DVOC))
+               return 'C';
+       else
+               return '?';
+}
+
 void intel_dvo_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *intel_encoder;
        struct intel_dvo *intel_dvo;
        struct intel_connector *intel_connector;
@@ -428,8 +441,6 @@ void intel_dvo_init(struct drm_device *dev)
        intel_dvo->attached_connector = intel_connector;
 
        intel_encoder = &intel_dvo->base;
-       drm_encoder_init(dev, &intel_encoder->base,
-                        &intel_dvo_enc_funcs, encoder_type, NULL);
 
        intel_encoder->disable = intel_disable_dvo;
        intel_encoder->enable = intel_enable_dvo;
@@ -438,7 +449,6 @@ void intel_dvo_init(struct drm_device *dev)
        intel_encoder->compute_config = intel_dvo_compute_config;
        intel_encoder->pre_enable = intel_dvo_pre_enable;
        intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
-       intel_connector->unregister = intel_connector_unregister;
 
        /* Now, try to find a controller */
        for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
@@ -496,6 +506,10 @@ void intel_dvo_init(struct drm_device *dev)
                if (!dvoinit)
                        continue;
 
+               drm_encoder_init(dev, &intel_encoder->base,
+                                &intel_dvo_enc_funcs, encoder_type,
+                                "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
+
                intel_encoder->type = INTEL_OUTPUT_DVO;
                intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
                switch (dvo->type) {
@@ -537,7 +551,6 @@ void intel_dvo_init(struct drm_device *dev)
                        intel_dvo->panel_wants_dither = true;
                }
 
-               drm_connector_register(connector);
                return;
        }
 
index d5a7cfec589b6f0c2e5b30f744eb7809ea2604a8..6a7ad3ed1463206c7b1b88e658e3103e7c58526e 100644 (file)
@@ -124,7 +124,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 
        /* Wait for compressing bit to clear */
-       if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
+       if (intel_wait_for_register(dev_priv,
+                                   FBC_STATUS, FBC_STAT_COMPRESSING, 0,
+                                   10)) {
                DRM_DEBUG_KMS("FBC idle timed out\n");
                return;
        }
@@ -374,8 +376,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
  * @dev_priv: i915 device instance
  *
  * This function is used to verify the current state of FBC.
+ *
  * FIXME: This should be tracked in the plane config eventually
- *        instead of queried at runtime for most callers.
+ * instead of queried at runtime for most callers.
  */
 bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
 {
@@ -389,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_work *work = &fbc->work;
        struct intel_crtc *crtc = fbc->crtc;
-       struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
+       struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
 
        if (drm_crtc_vblank_get(&crtc->base)) {
                DRM_ERROR("vblank not available for FBC on pipe %c\n",
@@ -442,7 +445,7 @@ out:
 
 static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_work *work = &fbc->work;
 
@@ -480,10 +483,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
                intel_fbc_hw_deactivate(dev_priv);
 }
 
-static bool multiple_pipes_ok(struct intel_crtc *crtc)
+static bool multiple_pipes_ok(struct intel_crtc *crtc,
+                             struct intel_plane_state *plane_state)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-       struct drm_plane *primary = crtc->base.primary;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        enum pipe pipe = crtc->pipe;
 
@@ -491,9 +494,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc)
        if (!no_fbc_on_multiple_pipes(dev_priv))
                return true;
 
-       WARN_ON(!drm_modeset_is_locked(&primary->mutex));
-
-       if (to_intel_plane_state(primary->state)->visible)
+       if (plane_state->visible)
                fbc->visible_pipes_mask |= (1 << pipe);
        else
                fbc->visible_pipes_mask &= ~(1 << pipe);
@@ -554,7 +555,7 @@ again:
 
 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct drm_mm_node *uninitialized_var(compressed_llb);
        int size, fb_cpp, ret;
@@ -685,7 +686,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
  */
 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        unsigned int effective_w, effective_h, max_w, max_h;
 
@@ -708,21 +709,16 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
        return effective_w <= max_w && effective_h <= max_h;
 }
 
-static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
+static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
+                                        struct intel_crtc_state *crtc_state,
+                                        struct intel_plane_state *plane_state)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
-       struct intel_crtc_state *crtc_state =
-               to_intel_crtc_state(crtc->base.state);
-       struct intel_plane_state *plane_state =
-               to_intel_plane_state(crtc->base.primary->state);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj;
 
-       WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
-       WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
-
        cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                cache->crtc.hsw_bdw_pixel_rate =
@@ -740,7 +736,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
 
        /* FIXME: We lack the proper locking here, so only run this on the
         * platforms that need. */
-       if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
+       if (IS_GEN(dev_priv, 5, 6))
                cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
        cache->fb.pixel_format = fb->pixel_format;
        cache->fb.stride = fb->pitches[0];
@@ -750,7 +746,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
@@ -822,23 +818,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
 
 static bool intel_fbc_can_choose(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
-       bool enable_by_default = IS_HASWELL(dev_priv) ||
-                                IS_BROADWELL(dev_priv);
 
-       if (intel_vgpu_active(dev_priv->dev)) {
+       if (intel_vgpu_active(dev_priv)) {
                fbc->no_fbc_reason = "VGPU is active";
                return false;
        }
 
-       if (i915.enable_fbc < 0 && !enable_by_default) {
-               fbc->no_fbc_reason = "disabled per chip default";
-               return false;
-       }
-
        if (!i915.enable_fbc) {
-               fbc->no_fbc_reason = "disabled per module param";
+               fbc->no_fbc_reason = "disabled per module param or by default";
                return false;
        }
 
@@ -858,7 +847,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
 static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
                                     struct intel_fbc_reg_params *params)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
 
@@ -887,9 +876,11 @@ static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
        return memcmp(params1, params2, sizeof(*params1)) == 0;
 }
 
-void intel_fbc_pre_update(struct intel_crtc *crtc)
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+                         struct intel_crtc_state *crtc_state,
+                         struct intel_plane_state *plane_state)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
@@ -897,7 +888,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
 
        mutex_lock(&fbc->lock);
 
-       if (!multiple_pipes_ok(crtc)) {
+       if (!multiple_pipes_ok(crtc, plane_state)) {
                fbc->no_fbc_reason = "more than one pipe active";
                goto deactivate;
        }
@@ -905,7 +896,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
        if (!fbc->enabled || fbc->crtc != crtc)
                goto unlock;
 
-       intel_fbc_update_state_cache(crtc);
+       intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
 
 deactivate:
        intel_fbc_deactivate(dev_priv);
@@ -915,7 +906,7 @@ unlock:
 
 static void __intel_fbc_post_update(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_reg_params old_params;
 
@@ -948,7 +939,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
 
 void intel_fbc_post_update(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
@@ -997,13 +988,13 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
        if (!fbc_supported(dev_priv))
                return;
 
-       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
-               return;
-
        mutex_lock(&fbc->lock);
 
        fbc->busy_bits &= ~frontbuffer_bits;
 
+       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
+               goto out;
+
        if (!fbc->busy_bits && fbc->enabled &&
            (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
                if (fbc->active)
@@ -1012,6 +1003,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
                        __intel_fbc_post_update(fbc->crtc);
        }
 
+out:
        mutex_unlock(&fbc->lock);
 }
 
@@ -1089,9 +1081,11 @@ out:
  * intel_fbc_enable multiple times for the same pipe without an
  * intel_fbc_disable in the middle, as long as it is deactivated.
  */
-void intel_fbc_enable(struct intel_crtc *crtc)
+void intel_fbc_enable(struct intel_crtc *crtc,
+                     struct intel_crtc_state *crtc_state,
+                     struct intel_plane_state *plane_state)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
@@ -1102,19 +1096,19 @@ void intel_fbc_enable(struct intel_crtc *crtc)
        if (fbc->enabled) {
                WARN_ON(fbc->crtc == NULL);
                if (fbc->crtc == crtc) {
-                       WARN_ON(!crtc->config->enable_fbc);
+                       WARN_ON(!crtc_state->enable_fbc);
                        WARN_ON(fbc->active);
                }
                goto out;
        }
 
-       if (!crtc->config->enable_fbc)
+       if (!crtc_state->enable_fbc)
                goto out;
 
        WARN_ON(fbc->active);
        WARN_ON(fbc->crtc != NULL);
 
-       intel_fbc_update_state_cache(crtc);
+       intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
        if (intel_fbc_alloc_cfb(crtc)) {
                fbc->no_fbc_reason = "not enough stolen memory";
                goto out;
@@ -1162,7 +1156,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
  */
 void intel_fbc_disable(struct intel_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
@@ -1216,12 +1210,32 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
        if (!no_fbc_on_multiple_pipes(dev_priv))
                return;
 
-       for_each_intel_crtc(dev_priv->dev, crtc)
+       for_each_intel_crtc(&dev_priv->drm, crtc)
                if (intel_crtc_active(&crtc->base) &&
                    to_intel_plane_state(crtc->base.primary->state)->visible)
                        dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
 }
 
+/*
+ * The DDX driver changes its behavior depending on the value it reads from
+ * i915.enable_fbc, so sanitize it by translating the default value into either
+ * 0 or 1 in order to allow it to know what's going on.
+ *
+ * Notice that this is done at driver initialization and we still allow user
+ * space to change the value during runtime without sanitizing it again. IGT
+ * relies on being able to change i915.enable_fbc at runtime.
+ */
+static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
+{
+       if (i915.enable_fbc >= 0)
+               return !!i915.enable_fbc;
+
+       if (IS_BROADWELL(dev_priv))
+               return 1;
+
+       return 0;
+}
+
 /**
  * intel_fbc_init - Initialize FBC
  * @dev_priv: the i915 device
@@ -1239,6 +1253,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
        fbc->active = false;
        fbc->work.scheduled = false;
 
+       i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
+       DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
+
        if (!HAS_FBC(dev_priv)) {
                fbc->no_fbc_reason = "unsupported by this chipset";
                return;
index ab8d09a81f14130028c1c454ec5a9ccb15851851..86b00c6db1a6d694ad1beb97fcb37fc2cb899873 100644 (file)
@@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
        if (size * 2 < ggtt->stolen_usable_size)
                obj = i915_gem_object_create_stolen(dev, size);
        if (obj == NULL)
-               obj = i915_gem_alloc_object(dev, size);
-       if (!obj) {
+               obj = i915_gem_object_create(dev, size);
+       if (IS_ERR(obj)) {
                DRM_ERROR("failed to allocate framebuffer\n");
-               ret = -ENOMEM;
+               ret = PTR_ERR(obj);
                goto out;
        }
 
@@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct fb_info *info;
        struct drm_framebuffer *fb;
+       struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
-       int size, ret;
        bool prealloc = false;
+       void *vaddr;
+       int ret;
 
        if (intel_fb &&
            (sizes->fb_width > intel_fb->base.width ||
@@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
        }
 
        obj = intel_fb->obj;
-       size = obj->base.size;
 
        mutex_lock(&dev->struct_mutex);
 
@@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
        info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
        info->fbops = &intelfb_ops;
 
+       vma = i915_gem_obj_to_ggtt(obj);
+
        /* setup aperture base/size for vesafb takeover */
        info->apertures->ranges[0].base = dev->mode_config.fb_base;
        info->apertures->ranges[0].size = ggtt->mappable_end;
 
-       info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
-       info->fix.smem_len = size;
+       info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
+       info->fix.smem_len = vma->node.size;
 
-       info->screen_base =
-               ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
-                          size);
-       if (!info->screen_base) {
+       vaddr = i915_vma_pin_iomap(vma);
+       if (IS_ERR(vaddr)) {
                DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
-               ret = -ENOSPC;
+               ret = PTR_ERR(vaddr);
                goto out_destroy_fbi;
        }
-       info->screen_size = size;
+       info->screen_base = vaddr;
+       info->screen_size = vma->node.size;
 
        /* This driver doesn't need a VT switch to restore the mode on resume */
        info->skip_vt_switch = true;
@@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
        drm_fb_helper_release_fbi(helper);
 out_unpin:
-       i915_gem_object_ggtt_unpin(obj);
+       intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -360,23 +362,24 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
                                    bool *enabled, int width, int height)
 {
        struct drm_device *dev = fb_helper->dev;
+       unsigned long conn_configured, mask;
+       unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
        int i, j;
        bool *save_enabled;
        bool fallback = true;
        int num_connectors_enabled = 0;
        int num_connectors_detected = 0;
-       uint64_t conn_configured = 0, mask;
        int pass = 0;
 
-       save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
-                              GFP_KERNEL);
+       save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
        if (!save_enabled)
                return false;
 
-       memcpy(save_enabled, enabled, fb_helper->connector_count);
-       mask = (1 << fb_helper->connector_count) - 1;
+       memcpy(save_enabled, enabled, count);
+       mask = BIT(count) - 1;
+       conn_configured = 0;
 retry:
-       for (i = 0; i < fb_helper->connector_count; i++) {
+       for (i = 0; i < count; i++) {
                struct drm_fb_helper_connector *fb_conn;
                struct drm_connector *connector;
                struct drm_encoder *encoder;
@@ -386,7 +389,7 @@ retry:
                fb_conn = fb_helper->connector_info[i];
                connector = fb_conn->connector;
 
-               if (conn_configured & (1 << i))
+               if (conn_configured & BIT(i))
                        continue;
 
                if (pass == 0 && !connector->has_tile)
@@ -398,7 +401,7 @@ retry:
                if (!enabled[i]) {
                        DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
                                      connector->name);
-                       conn_configured |= (1 << i);
+                       conn_configured |= BIT(i);
                        continue;
                }
 
@@ -417,7 +420,7 @@ retry:
                        DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
                                      connector->name);
                        enabled[i] = false;
-                       conn_configured |= (1 << i);
+                       conn_configured |= BIT(i);
                        continue;
                }
 
@@ -430,14 +433,15 @@ retry:
                        intel_crtc->lut_b[j] = j;
                }
 
-               new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
+               new_crtc = intel_fb_helper_crtc(fb_helper,
+                                               connector->state->crtc);
 
                /*
                 * Make sure we're not trying to drive multiple connectors
                 * with a single CRTC, since our cloning support may not
                 * match the BIOS.
                 */
-               for (j = 0; j < fb_helper->connector_count; j++) {
+               for (j = 0; j < count; j++) {
                        if (crtcs[j] == new_crtc) {
                                DRM_DEBUG_KMS("fallback: cloned configuration\n");
                                goto bail;
@@ -488,15 +492,15 @@ retry:
                }
                crtcs[i] = new_crtc;
 
-               DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
+               DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
                              connector->name,
-                             pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
                              connector->state->crtc->base.id,
+                             connector->state->crtc->name,
                              modes[i]->hdisplay, modes[i]->vdisplay,
                              modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
 
                fallback = false;
-               conn_configured |= (1 << i);
+               conn_configured |= BIT(i);
        }
 
        if ((conn_configured & mask) != mask) {
@@ -520,7 +524,7 @@ retry:
        if (fallback) {
 bail:
                DRM_DEBUG_KMS("Not using firmware configuration\n");
-               memcpy(enabled, save_enabled, fb_helper->connector_count);
+               memcpy(enabled, save_enabled, count);
                kfree(save_enabled);
                return false;
        }
@@ -536,8 +540,7 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
        .fb_probe = intelfb_create,
 };
 
-static void intel_fbdev_destroy(struct drm_device *dev,
-                               struct intel_fbdev *ifbdev)
+static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 {
        /* We rely on the object-free to release the VMA pinning for
         * the info->screen_base mmaping. Leaking the VMA is simpler than
@@ -550,9 +553,14 @@ static void intel_fbdev_destroy(struct drm_device *dev,
        drm_fb_helper_fini(&ifbdev->helper);
 
        if (ifbdev->fb) {
-               drm_framebuffer_unregister_private(&ifbdev->fb->base);
+               mutex_lock(&ifbdev->helper.dev->struct_mutex);
+               intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
+               mutex_unlock(&ifbdev->helper.dev->struct_mutex);
+
                drm_framebuffer_remove(&ifbdev->fb->base);
        }
+
+       kfree(ifbdev);
 }
 
 /*
@@ -685,9 +693,9 @@ out:
 
 static void intel_fbdev_suspend_worker(struct work_struct *work)
 {
-       intel_fbdev_set_suspend(container_of(work,
-                                            struct drm_i915_private,
-                                            fbdev_suspend_work)->dev,
+       intel_fbdev_set_suspend(&container_of(work,
+                                             struct drm_i915_private,
+                                             fbdev_suspend_work)->drm,
                                FBINFO_STATE_RUNNING,
                                true);
 }
@@ -695,7 +703,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
 int intel_fbdev_init(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
@@ -717,8 +725,6 @@ int intel_fbdev_init(struct drm_device *dev)
                return ret;
        }
 
-       ifbdev->helper.atomic = true;
-
        dev_priv->fbdev = ifbdev;
        INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
 
@@ -729,38 +735,50 @@ int intel_fbdev_init(struct drm_device *dev)
 
 static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
 {
-       struct drm_i915_private *dev_priv = data;
-       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+       struct intel_fbdev *ifbdev = data;
 
        /* Due to peculiar init order wrt to hpd handling this is separate. */
        if (drm_fb_helper_initial_config(&ifbdev->helper,
                                         ifbdev->preferred_bpp))
-               intel_fbdev_fini(dev_priv->dev);
+               intel_fbdev_fini(ifbdev->helper.dev);
 }
 
 void intel_fbdev_initial_config_async(struct drm_device *dev)
 {
-       async_schedule(intel_fbdev_initial_config, to_i915(dev));
+       struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+
+       ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
+}
+
+static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
+{
+       if (!ifbdev->cookie)
+               return;
+
+       /* Only serialises with all preceding async calls, hence +1 */
+       async_synchronize_cookie(ifbdev->cookie + 1);
+       ifbdev->cookie = 0;
 }
 
 void intel_fbdev_fini(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       if (!dev_priv->fbdev)
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_fbdev *ifbdev = dev_priv->fbdev;
+
+       if (!ifbdev)
                return;
 
        flush_work(&dev_priv->fbdev_suspend_work);
-
        if (!current_is_async())
-               async_synchronize_full();
-       intel_fbdev_destroy(dev, dev_priv->fbdev);
-       kfree(dev_priv->fbdev);
+               intel_fbdev_sync(ifbdev);
+
+       intel_fbdev_destroy(ifbdev);
        dev_priv->fbdev = NULL;
 }
 
 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_fbdev *ifbdev = dev_priv->fbdev;
        struct fb_info *info;
 
@@ -809,7 +827,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
 
 void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        if (dev_priv->fbdev)
                drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
 }
@@ -817,13 +835,15 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
 void intel_fbdev_restore_mode(struct drm_device *dev)
 {
        int ret;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_fbdev *ifbdev = dev_priv->fbdev;
        struct drm_fb_helper *fb_helper;
 
        if (!ifbdev)
                return;
 
+       intel_fbdev_sync(ifbdev);
+
        fb_helper = &ifbdev->helper;
 
        ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
index 9be839a242f942752ea44c1278a036c0f94f9443..2aa744081f090248bdfc721703f2d3da3692c3cc 100644 (file)
@@ -50,7 +50,7 @@
 
 static bool ivb_can_enable_err_int(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc;
        enum pipe pipe;
 
@@ -68,7 +68,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
 
 static bool cpt_can_enable_serr_int(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
        struct intel_crtc *crtc;
 
@@ -105,7 +105,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
                                             enum pipe pipe,
                                             bool enable, bool old)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t reg = PIPESTAT(pipe);
        u32 pipestat = I915_READ(reg) & 0xffff0000;
 
@@ -123,7 +123,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
                                                 enum pipe pipe, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
                                          DE_PIPEB_FIFO_UNDERRUN;
 
@@ -154,7 +154,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
                                                  enum pipe pipe,
                                                  bool enable, bool old)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        if (enable) {
                I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
 
@@ -176,7 +176,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
                                                  enum pipe pipe, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (enable)
                bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
@@ -188,7 +188,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
                                            enum transcoder pch_transcoder,
                                            bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
                       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
 
@@ -220,7 +220,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
                                            enum transcoder pch_transcoder,
                                            bool enable, bool old)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (enable) {
                I915_WRITE(SERR_INT,
@@ -244,7 +244,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
                                                    enum pipe pipe, bool enable)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        bool old;
@@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
        bool ret;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
+       ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
                                                      enable);
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
@@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
        intel_crtc->pch_fifo_underrun_disabled = !enable;
 
        if (HAS_PCH_IBX(dev_priv))
-               ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+               ibx_set_fifo_underrun_reporting(&dev_priv->drm,
+                                               pch_transcoder,
                                                enable);
        else
-               cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
+               cpt_set_fifo_underrun_reporting(&dev_priv->drm,
+                                               pch_transcoder,
                                                enable, old);
 
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
 
        spin_lock_irq(&dev_priv->irq_lock);
 
-       for_each_intel_crtc(dev_priv->dev, crtc) {
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
                if (crtc->cpu_fifo_underrun_disabled)
                        continue;
 
@@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
 
        spin_lock_irq(&dev_priv->irq_lock);
 
-       for_each_intel_crtc(dev_priv->dev, crtc) {
+       for_each_intel_crtc(&dev_priv->drm, crtc) {
                if (crtc->pch_fifo_underrun_disabled)
                        continue;
 
index 9d79c4c3e256d21fbecb579bd73ef86cf3f332d8..3e3e743740c01a2651850836f4fee970ea294b42 100644 (file)
@@ -26,6 +26,7 @@
 
 #include "intel_guc_fwif.h"
 #include "i915_guc_reg.h"
+#include "intel_ringbuffer.h"
 
 struct drm_i915_gem_request;
 
@@ -48,14 +49,23 @@ struct drm_i915_gem_request;
  * queue (a circular array of work items), again described in the process
  * descriptor. Work queue pages are mapped momentarily as required.
  *
- * Finally, we also keep a few statistics here, including the number of
- * submissions to each engine, and a record of the last submission failure
- * (if any).
+ * We also keep a few statistics on failures. Ideally, these should all
+ * be zero!
+ *   no_wq_space: times that the submission pre-check found no space was
+ *                available in the work queue (note, the queue is shared,
+ *                not per-engine). It is OK for this to be nonzero, but
+ *                it should not be huge!
+ *   q_fail: failed to enqueue a work item. This should never happen,
+ *           because we check for space beforehand.
+ *   b_fail: failed to ring the doorbell. This should never happen, unless
+ *           somehow the hardware misbehaves, or maybe if the GuC firmware
+ *           crashes? We probably need to reset the GPU to recover.
+ *   retcode: errno from last guc_submit()
  */
 struct i915_guc_client {
        struct drm_i915_gem_object *client_obj;
        void *client_base;              /* first page (only) of above   */
-       struct intel_context *owner;
+       struct i915_gem_context *owner;
        struct intel_guc *guc;
        uint32_t priority;
        uint32_t ctx_index;
@@ -71,12 +81,13 @@ struct i915_guc_client {
        uint32_t wq_tail;
        uint32_t unused;                /* Was 'wq_head'                */
 
-       /* GuC submission statistics & status */
-       uint64_t submissions[GUC_MAX_ENGINES_NUM];
-       uint32_t q_fail;
+       uint32_t no_wq_space;
+       uint32_t q_fail;                /* No longer used               */
        uint32_t b_fail;
        int retcode;
-       int spare;                      /* pad to 32 DWords             */
+
+       /* Per-engine counts of GuC submissions */
+       uint64_t submissions[I915_NUM_ENGINES];
 };
 
 enum intel_guc_fw_status {
@@ -133,25 +144,24 @@ struct intel_guc {
        uint32_t action_fail;           /* Total number of failures     */
        int32_t action_err;             /* Last error code              */
 
-       uint64_t submissions[GUC_MAX_ENGINES_NUM];
-       uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
+       uint64_t submissions[I915_NUM_ENGINES];
+       uint32_t last_seqno[I915_NUM_ENGINES];
 };
 
 /* intel_guc_loader.c */
-extern void intel_guc_ucode_init(struct drm_device *dev);
-extern int intel_guc_ucode_load(struct drm_device *dev);
-extern void intel_guc_ucode_fini(struct drm_device *dev);
+extern void intel_guc_init(struct drm_device *dev);
+extern int intel_guc_setup(struct drm_device *dev);
+extern void intel_guc_fini(struct drm_device *dev);
 extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
 extern int intel_guc_suspend(struct drm_device *dev);
 extern int intel_guc_resume(struct drm_device *dev);
 
 /* i915_guc_submission.c */
-int i915_guc_submission_init(struct drm_device *dev);
-int i915_guc_submission_enable(struct drm_device *dev);
-int i915_guc_submit(struct i915_guc_client *client,
-                   struct drm_i915_gem_request *rq);
-void i915_guc_submission_disable(struct drm_device *dev);
-void i915_guc_submission_fini(struct drm_device *dev);
-int i915_guc_wq_check_space(struct i915_guc_client *client);
+int i915_guc_submission_init(struct drm_i915_private *dev_priv);
+int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
+int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
+int i915_guc_submit(struct drm_i915_gem_request *rq);
+void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
 
 #endif
index 2de57ffe5e18b8a20538ff33e70b8cc679c46ca0..944786d7075b7c73631093c3240f091dda065c1d 100644 (file)
@@ -71,7 +71,8 @@
 #define   WQ_WORKLOAD_TOUCH            (2 << WQ_WORKLOAD_SHIFT)
 
 #define WQ_RING_TAIL_SHIFT             20
-#define WQ_RING_TAIL_MASK              (0x7FF << WQ_RING_TAIL_SHIFT)
+#define WQ_RING_TAIL_MAX               0x7FF   /* 2^11 QWords */
+#define WQ_RING_TAIL_MASK              (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
 
 #define GUC_DOORBELL_ENABLED           1
 #define GUC_DOORBELL_DISABLED          0
index 876e5da44c4e01ec1cfe225f1b3bc8fc844a939e..605c69658d2c1c55b2408b0bc36dd6c41d26d667 100644 (file)
  *
  */
 
-#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin"
+#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
 MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
 
+#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
+MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
+
+#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
+MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
+
 /* User-friendly representation of an enum */
 const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
 {
@@ -84,7 +90,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
        struct intel_engine_cs *engine;
        int irqs;
 
-       /* tell all command streamers NOT to forward interrupts and vblank to GuC */
+       /* tell all command streamers NOT to forward interrupts or vblank to GuC */
        irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
        irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
        for_each_engine(engine, dev_priv)
@@ -100,10 +106,10 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
        int irqs;
+       u32 tmp;
 
-       /* tell all command streamers to forward interrupts and vblank to GuC */
-       irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
-       irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
+       /* tell all command streamers to forward interrupts (but not vblank) to GuC */
+       irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
        for_each_engine(engine, dev_priv)
                I915_WRITE(RING_MODE_GEN7(engine), irqs);
 
@@ -114,6 +120,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
        I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
        I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
        I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+
+       /*
+        * If GuC has routed PM interrupts to itself, don't keep it.
+        * and keep other interrupts those are unmasked by GuC.
+       */
+       tmp = I915_READ(GEN6_PMINTRMSK);
+       if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
+               dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
+               dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+       }
 }
 
 static u32 get_gttype(struct drm_i915_private *dev_priv)
@@ -281,13 +297,24 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
        return ret;
 }
 
+static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
+{
+       u32 wopcm_size = GUC_WOPCM_TOP;
+
+       /* On BXT, the top of WOPCM is reserved for RC6 context */
+       if (IS_BROXTON(dev_priv))
+               wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
+
+       return wopcm_size;
+}
+
 /*
  * Load the GuC firmware blob into the MinuteIA.
  */
 static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 {
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        int ret;
 
        ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -308,7 +335,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /* init WOPCM */
-       I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE);
+       I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
        I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
 
        /* Enable MIA caching. GuC clock gating is disabled. */
@@ -372,66 +399,63 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
 }
 
 /**
- * intel_guc_ucode_load() - load GuC uCode into the device
+ * intel_guc_setup() - finish preparing the GuC for activity
  * @dev:       drm device
  *
  * Called from gem_init_hw() during driver loading and also after a GPU reset.
  *
+ * The main action required here it to load the GuC uCode into the device.
  * The firmware image should have already been fetched into memory by the
- * earlier call to intel_guc_ucode_init(), so here we need only check that
- * is succeeded, and then transfer the image to the h/w.
+ * earlier call to intel_guc_init(), so here we need only check that worked,
+ * and then transfer the image to the h/w.
  *
  * Return:     non-zero code on error
  */
-int intel_guc_ucode_load(struct drm_device *dev)
+int intel_guc_setup(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-       int retries, err = 0;
+       const char *fw_path = guc_fw->guc_fw_path;
+       int retries, ret, err;
 
-       if (!i915.enable_guc_submission)
-               return 0;
-
-       DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+       DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
+               fw_path,
                intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
                intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
 
-       direct_interrupts_to_host(dev_priv);
-
-       if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
-               return 0;
-
-       if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
-           guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
-               return -ENOEXEC;
-
-       guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
-
-       DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
-               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
-
-       switch (guc_fw->guc_fw_fetch_status) {
-       case GUC_FIRMWARE_FAIL:
-               /* something went wrong :( */
-               err = -EIO;
+       /* Loading forbidden, or no firmware to load? */
+       if (!i915.enable_guc_loading) {
+               err = 0;
                goto fail;
-
-       case GUC_FIRMWARE_NONE:
-       case GUC_FIRMWARE_PENDING:
-       default:
-               /* "can't happen" */
-               WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
-                       guc_fw->guc_fw_path,
-                       intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
-                       guc_fw->guc_fw_fetch_status);
+       } else if (fw_path == NULL) {
+               /* Device is known to have no uCode (e.g. no GuC) */
                err = -ENXIO;
                goto fail;
+       } else if (*fw_path == '\0') {
+               /* Device has a GuC but we don't know what f/w to load? */
+               DRM_INFO("No GuC firmware known for this platform\n");
+               err = -ENODEV;
+               goto fail;
+       }
 
-       case GUC_FIRMWARE_SUCCESS:
-               break;
+       /* Fetch failed, or already fetched but failed to load? */
+       if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
+               err = -EIO;
+               goto fail;
+       } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
+               err = -ENOEXEC;
+               goto fail;
        }
 
-       err = i915_guc_submission_init(dev);
+       direct_interrupts_to_host(dev_priv);
+
+       guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+
+       DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
+               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
+               intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+
+       err = i915_guc_submission_init(dev_priv);
        if (err)
                goto fail;
 
@@ -448,7 +472,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
                 */
                err = i915_reset_guc(dev_priv);
                if (err) {
-                       DRM_ERROR("GuC reset failed, err %d\n", err);
+                       DRM_ERROR("GuC reset failed: %d\n", err);
                        goto fail;
                }
 
@@ -459,8 +483,8 @@ int intel_guc_ucode_load(struct drm_device *dev)
                if (--retries == 0)
                        goto fail;
 
-               DRM_INFO("GuC fw load failed, err %d; will reset and "
-                       "retry %d more time(s)\n", err, retries);
+               DRM_INFO("GuC fw load failed: %d; will reset and "
+                        "retry %d more time(s)\n", err, retries);
        }
 
        guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -470,10 +494,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
                intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
 
        if (i915.enable_guc_submission) {
-               /* The execbuf_client will be recreated. Release it first. */
-               i915_guc_submission_disable(dev);
-
-               err = i915_guc_submission_enable(dev);
+               err = i915_guc_submission_enable(dev_priv);
                if (err)
                        goto fail;
                direct_interrupts_to_guc(dev_priv);
@@ -482,15 +503,50 @@ int intel_guc_ucode_load(struct drm_device *dev)
        return 0;
 
 fail:
-       DRM_ERROR("GuC firmware load failed, err %d\n", err);
        if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
                guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
 
        direct_interrupts_to_host(dev_priv);
-       i915_guc_submission_disable(dev);
-       i915_guc_submission_fini(dev);
+       i915_guc_submission_disable(dev_priv);
+       i915_guc_submission_fini(dev_priv);
 
-       return err;
+       /*
+        * We've failed to load the firmware :(
+        *
+        * Decide whether to disable GuC submission and fall back to
+        * execlist mode, and whether to hide the error by returning
+        * zero or to return -EIO, which the caller will treat as a
+        * nonfatal error (i.e. it doesn't prevent driver load, but
+        * marks the GPU as wedged until reset).
+        */
+       if (i915.enable_guc_loading > 1) {
+               ret = -EIO;
+       } else if (i915.enable_guc_submission > 1) {
+               ret = -EIO;
+       } else {
+               ret = 0;
+       }
+
+       if (err == 0 && !HAS_GUC_UCODE(dev))
+               ;       /* Don't mention the GuC! */
+       else if (err == 0)
+               DRM_INFO("GuC firmware load skipped\n");
+       else if (ret != -EIO)
+               DRM_INFO("GuC firmware load failed: %d\n", err);
+       else
+               DRM_ERROR("GuC firmware load failed: %d\n", err);
+
+       if (i915.enable_guc_submission) {
+               if (fw_path == NULL)
+                       DRM_INFO("GuC submission without firmware not supported\n");
+               if (ret == 0)
+                       DRM_INFO("Falling back from GuC submission to execlist mode\n");
+               else
+                       DRM_ERROR("GuC init failed: %d\n", ret);
+       }
+       i915.enable_guc_submission = 0;
+
+       return ret;
 }
 
 static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
@@ -552,9 +608,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
 
        /* Header and uCode will be loaded to WOPCM. Size of the two. */
        size = guc_fw->header_size + guc_fw->ucode_size;
-
-       /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
-       if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
+       if (size > guc_wopcm_size(to_i915(dev))) {
                DRM_ERROR("Firmware is too large to fit in WOPCM\n");
                goto fail;
        }
@@ -617,22 +671,25 @@ fail:
 }
 
 /**
- * intel_guc_ucode_init() - define parameters and fetch firmware
+ * intel_guc_init() - define parameters and fetch firmware
  * @dev:       drm device
  *
  * Called early during driver load, but after GEM is initialised.
  *
  * The firmware will be transferred to the GuC's memory later,
- * when intel_guc_ucode_load() is called.
+ * when intel_guc_setup() is called.
  */
-void intel_guc_ucode_init(struct drm_device *dev)
+void intel_guc_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
        const char *fw_path;
 
-       if (!HAS_GUC_SCHED(dev))
-               i915.enable_guc_submission = false;
+       /* A negative value means "use platform default" */
+       if (i915.enable_guc_loading < 0)
+               i915.enable_guc_loading = HAS_GUC_UCODE(dev);
+       if (i915.enable_guc_submission < 0)
+               i915.enable_guc_submission = HAS_GUC_SCHED(dev);
 
        if (!HAS_GUC_UCODE(dev)) {
                fw_path = NULL;
@@ -640,27 +697,30 @@ void intel_guc_ucode_init(struct drm_device *dev)
                fw_path = I915_SKL_GUC_UCODE;
                guc_fw->guc_fw_major_wanted = 6;
                guc_fw->guc_fw_minor_wanted = 1;
+       } else if (IS_BROXTON(dev)) {
+               fw_path = I915_BXT_GUC_UCODE;
+               guc_fw->guc_fw_major_wanted = 8;
+               guc_fw->guc_fw_minor_wanted = 7;
+       } else if (IS_KABYLAKE(dev)) {
+               fw_path = I915_KBL_GUC_UCODE;
+               guc_fw->guc_fw_major_wanted = 9;
+               guc_fw->guc_fw_minor_wanted = 14;
        } else {
-               i915.enable_guc_submission = false;
                fw_path = "";   /* unknown device */
        }
 
-       if (!i915.enable_guc_submission)
-               return;
-
        guc_fw->guc_dev = dev;
        guc_fw->guc_fw_path = fw_path;
        guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
        guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
 
+       /* Early (and silent) return if GuC loading is disabled */
+       if (!i915.enable_guc_loading)
+               return;
        if (fw_path == NULL)
                return;
-
-       if (*fw_path == '\0') {
-               DRM_ERROR("No GuC firmware known for this platform\n");
-               guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+       if (*fw_path == '\0')
                return;
-       }
 
        guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
        DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
@@ -669,18 +729,18 @@ void intel_guc_ucode_init(struct drm_device *dev)
 }
 
 /**
- * intel_guc_ucode_fini() - clean up all allocated resources
+ * intel_guc_fini() - clean up all allocated resources
  * @dev:       drm device
  */
-void intel_guc_ucode_fini(struct drm_device *dev)
+void intel_guc_fini(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
        mutex_lock(&dev->struct_mutex);
        direct_interrupts_to_host(dev_priv);
-       i915_guc_submission_disable(dev);
-       i915_guc_submission_fini(dev);
+       i915_guc_submission_disable(dev_priv);
+       i915_guc_submission_fini(dev_priv);
 
        if (guc_fw->guc_fw_obj)
                drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
new file mode 100644 (file)
index 0000000..434f4d5
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_gvt.h"
+
+/**
+ * DOC: Intel GVT-g host support
+ *
+ * Intel GVT-g is a graphics virtualization technology which shares the
+ * GPU among multiple virtual machines on a time-sharing basis. Each
+ * virtual machine is presented a virtual GPU (vGPU), which has equivalent
+ * features as the underlying physical GPU (pGPU), so i915 driver can run
+ * seamlessly in a virtual machine. This file provides the englightments
+ * of GVT and the necessary components used by GVT in i915 driver.
+ */
+
+static bool is_supported_device(struct drm_i915_private *dev_priv)
+{
+       if (IS_BROADWELL(dev_priv))
+               return true;
+       return false;
+}
+
+/**
+ * intel_gvt_init - initialize GVT components
+ * @dev_priv: drm i915 private data
+ *
+ * This function is called at the initialization stage to create a GVT device.
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ *
+ */
+int intel_gvt_init(struct drm_i915_private *dev_priv)
+{
+       int ret;
+
+       if (!i915.enable_gvt) {
+               DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
+               return 0;
+       }
+
+       if (!is_supported_device(dev_priv)) {
+               DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
+               goto bail;
+       }
+
+       /*
+        * We're not in host or fail to find a MPT module, disable GVT-g
+        */
+       ret = intel_gvt_init_host();
+       if (ret) {
+               DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
+               goto bail;
+       }
+
+       ret = intel_gvt_init_device(dev_priv);
+       if (ret) {
+               DRM_DEBUG_DRIVER("Fail to init GVT device\n");
+               goto bail;
+       }
+
+       return 0;
+
+bail:
+       i915.enable_gvt = 0;
+       return 0;
+}
+
+/**
+ * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading
+ * @dev_priv: drm i915 private *
+ *
+ * This function is called at the i915 driver unloading stage, to shutdown
+ * GVT components and release the related resources.
+ */
+void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+{
+       if (!intel_gvt_active(dev_priv))
+               return;
+
+       intel_gvt_clean_device(dev_priv);
+}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
new file mode 100644 (file)
index 0000000..960211d
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _INTEL_GVT_H_
+#define _INTEL_GVT_H_
+
+#include "gvt/gvt.h"
+
+#ifdef CONFIG_DRM_I915_GVT
+int intel_gvt_init(struct drm_i915_private *dev_priv);
+void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
+int intel_gvt_init_device(struct drm_i915_private *dev_priv);
+void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
+int intel_gvt_init_host(void);
+#else
+static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
+{
+       return 0;
+}
+static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+{
+}
+#endif
+
+#endif /* _INTEL_GVT_H_ */
index 2c3bd9c2573e1be42b532965d76aa25987db409f..4df9f384910ccd8050119873a2ae9418db57d15e 100644 (file)
@@ -47,7 +47,7 @@ static void
 assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
 {
        struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t enabled_bits;
 
        enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
@@ -138,7 +138,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 val = I915_READ(VIDEO_DIP_CTL);
        int i;
 
@@ -192,7 +192,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
@@ -251,7 +251,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
@@ -308,7 +308,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
        u32 val = I915_READ(reg);
@@ -366,7 +366,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
 {
        const uint32_t *data = frame;
        struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -508,7 +508,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        i915_reg_t reg = VIDEO_DIP_CTL;
@@ -629,7 +629,7 @@ static bool gcp_default_phase_possible(int pipe_bpp,
 
 static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
        i915_reg_t reg;
        u32 val = 0;
@@ -661,7 +661,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
@@ -713,7 +713,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -755,7 +755,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -807,7 +807,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
                               bool enable,
                               const struct drm_display_mode *adjusted_mode)
 {
-       struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
        i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
@@ -855,7 +855,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
 static void intel_hdmi_prepare(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -894,7 +894,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        enum intel_display_power_domain power_domain;
        u32 tmp;
@@ -931,7 +931,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
 {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp, flags = 0;
        int dotclock;
 
@@ -988,7 +988,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
 static void g4x_enable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        u32 temp;
@@ -1009,7 +1009,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
 static void ibx_enable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        u32 temp;
@@ -1058,7 +1058,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
 static void cpt_enable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        enum pipe pipe = crtc->pipe;
@@ -1115,7 +1115,7 @@ static void vlv_enable_hdmi(struct intel_encoder *encoder)
 static void intel_disable_hdmi(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        u32 temp;
@@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
                I915_WRITE(intel_hdmi->hdmi_reg, temp);
                POSTING_READ(intel_hdmi->hdmi_reg);
 
-               intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+               intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
        }
@@ -1273,33 +1273,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
 static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
 {
        struct drm_device *dev = crtc_state->base.crtc->dev;
-       struct drm_atomic_state *state;
-       struct intel_encoder *encoder;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
-       int count = 0, count_hdmi = 0;
-       int i;
 
        if (HAS_GMCH_DISPLAY(dev))
                return false;
 
-       state = crtc_state->base.state;
-
-       for_each_connector_in_state(state, connector, connector_state, i) {
-               if (connector_state->crtc != crtc_state->base.crtc)
-                       continue;
-
-               encoder = to_intel_encoder(connector_state->best_encoder);
-
-               count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
-               count++;
-       }
-
        /*
         * HDMI 12bpc affects the clocks, so it's only possible
         * when not cloning with other encoder types.
         */
-       return count_hdmi > 0 && count_hdmi == count;
+       return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
 }
 
 bool intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -1575,7 +1557,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
        struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
        struct intel_digital_port *intel_dig_port =
                hdmi_to_dig_port(intel_hdmi);
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        int ret;
 
        ret = drm_object_property_set_value(&connector->base, property, val);
@@ -1674,39 +1656,16 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dport->hdmi;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       int pipe = intel_crtc->pipe;
-       u32 val;
 
-       /* Enable clock channels for this port */
-       mutex_lock(&dev_priv->sb_lock);
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
-       val = 0;
-       if (pipe)
-               val |= (1<<21);
-       else
-               val &= ~(1<<21);
-       val |= 0x001000c4;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+       vlv_phy_pre_encoder_enable(encoder);
 
        /* HDMI 1.0V-2dB */
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
-
-       /* Program lane clock */
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
-       mutex_unlock(&dev_priv->sb_lock);
+       vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
+                                0x2b247878);
 
        intel_hdmi->set_infoframes(&encoder->base,
                                   intel_crtc->config->has_hdmi_sink,
@@ -1719,213 +1678,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
 
 static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       int pipe = intel_crtc->pipe;
-
        intel_hdmi_prepare(encoder);
 
-       /* Program Tx lane resets to default */
-       mutex_lock(&dev_priv->sb_lock);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
-                        DPIO_PCS_TX_LANE2_RESET |
-                        DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
-                        DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
-                        DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
-                        (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
-                        DPIO_PCS_CLK_SOFT_RESET);
-
-       /* Fix up inter-pair skew failure */
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
-
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
-       vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
-       mutex_unlock(&dev_priv->sb_lock);
-}
-
-static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
-                                    bool reset)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
-       enum pipe pipe = crtc->pipe;
-       uint32_t val;
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
-       if (reset)
-               val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-       else
-               val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
-
-       if (crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
-               if (reset)
-                       val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
-               else
-                       val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
-       }
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
-       val |= CHV_PCS_REQ_SOFTRESET_EN;
-       if (reset)
-               val &= ~DPIO_PCS_CLK_SOFT_RESET;
-       else
-               val |= DPIO_PCS_CLK_SOFT_RESET;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
-
-       if (crtc->config->lane_count > 2) {
-               val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
-               val |= CHV_PCS_REQ_SOFTRESET_EN;
-               if (reset)
-                       val &= ~DPIO_PCS_CLK_SOFT_RESET;
-               else
-                       val |= DPIO_PCS_CLK_SOFT_RESET;
-               vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
-       }
+       vlv_phy_pre_pll_enable(encoder);
 }
 
 static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       enum pipe pipe = intel_crtc->pipe;
-       u32 val;
-
        intel_hdmi_prepare(encoder);
 
-       /*
-        * Must trick the second common lane into life.
-        * Otherwise we can't even access the PLL.
-        */
-       if (ch == DPIO_CH0 && pipe == PIPE_B)
-               dport->release_cl2_override =
-                       !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
-
-       chv_phy_powergate_lanes(encoder, true, 0x0);
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       /* Assert data lane reset */
-       chv_data_lane_soft_reset(encoder, true);
-
-       /* program left/right clock distribution */
-       if (pipe != PIPE_B) {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
-               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
-               if (ch == DPIO_CH0)
-                       val |= CHV_BUFLEFTENA1_FORCE;
-               if (ch == DPIO_CH1)
-                       val |= CHV_BUFRIGHTENA1_FORCE;
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
-       } else {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
-               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
-               if (ch == DPIO_CH0)
-                       val |= CHV_BUFLEFTENA2_FORCE;
-               if (ch == DPIO_CH1)
-                       val |= CHV_BUFRIGHTENA2_FORCE;
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
-       }
-
-       /* program clock channel usage */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
-       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
-       if (pipe != PIPE_B)
-               val &= ~CHV_PCS_USEDCLKCHANNEL;
-       else
-               val |= CHV_PCS_USEDCLKCHANNEL;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
-       val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
-       if (pipe != PIPE_B)
-               val &= ~CHV_PCS_USEDCLKCHANNEL;
-       else
-               val |= CHV_PCS_USEDCLKCHANNEL;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
-
-       /*
-        * This a a bit weird since generally CL
-        * matches the pipe, but here we need to
-        * pick the CL based on the port.
-        */
-       val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
-       if (pipe != PIPE_B)
-               val &= ~CHV_CMN_USEDCLKCHANNEL;
-       else
-               val |= CHV_CMN_USEDCLKCHANNEL;
-       vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
-
-       mutex_unlock(&dev_priv->sb_lock);
+       chv_phy_pre_pll_enable(encoder);
 }
 
 static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
-       u32 val;
-
-       mutex_lock(&dev_priv->sb_lock);
-
-       /* disable left/right clock distribution */
-       if (pipe != PIPE_B) {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
-               val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
-       } else {
-               val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
-               val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
-               vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
-       }
-
-       mutex_unlock(&dev_priv->sb_lock);
-
-       /*
-        * Leave the power down bit cleared for at least one
-        * lane so that chv_powergate_phy_ch() will power
-        * on something when the channel is otherwise unused.
-        * When the port is off and the override is removed
-        * the lanes power down anyway, so otherwise it doesn't
-        * really matter what the state of power down bits is
-        * after this.
-        */
-       chv_phy_powergate_lanes(encoder, false, 0x0);
+       chv_phy_post_pll_disable(encoder);
 }
 
 static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 {
-       struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
-       struct intel_crtc *intel_crtc =
-               to_intel_crtc(encoder->base.crtc);
-       enum dpio_channel port = vlv_dport_to_channel(dport);
-       int pipe = intel_crtc->pipe;
-
        /* Reset lanes to avoid HDMI flicker (VLV w/a) */
-       mutex_lock(&dev_priv->sb_lock);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
-       mutex_unlock(&dev_priv->sb_lock);
+       vlv_phy_reset_lanes(encoder);
 }
 
 static void chv_hdmi_post_disable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        mutex_lock(&dev_priv->sb_lock);
 
@@ -1940,142 +1719,16 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct intel_hdmi *intel_hdmi = &dport->hdmi;
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
-       enum dpio_channel ch = vlv_dport_to_channel(dport);
-       int pipe = intel_crtc->pipe;
-       int data, i, stagger;
-       u32 val;
-
-       mutex_lock(&dev_priv->sb_lock);
 
-       /* allow hardware to manage TX FIFO reset source */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
-       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
-       val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
-       /* Program Tx latency optimal setting */
-       for (i = 0; i < 4; i++) {
-               /* Set the upar bit */
-               data = (i == 1) ? 0x0 : 0x1;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
-                               data << DPIO_UPAR_SHIFT);
-       }
-
-       /* Data lane stagger programming */
-       if (intel_crtc->config->port_clock > 270000)
-               stagger = 0x18;
-       else if (intel_crtc->config->port_clock > 135000)
-               stagger = 0xd;
-       else if (intel_crtc->config->port_clock > 67500)
-               stagger = 0x7;
-       else if (intel_crtc->config->port_clock > 33750)
-               stagger = 0x4;
-       else
-               stagger = 0x2;
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
-       val |= DPIO_TX2_STAGGER_MASK(0x1f);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
-       val |= DPIO_TX2_STAGGER_MASK(0x1f);
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
-
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
-                      DPIO_LANESTAGGER_STRAP(stagger) |
-                      DPIO_LANESTAGGER_STRAP_OVRD |
-                      DPIO_TX1_STAGGER_MASK(0x1f) |
-                      DPIO_TX1_STAGGER_MULT(6) |
-                      DPIO_TX2_STAGGER_MULT(0));
-
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
-                      DPIO_LANESTAGGER_STRAP(stagger) |
-                      DPIO_LANESTAGGER_STRAP_OVRD |
-                      DPIO_TX1_STAGGER_MASK(0x1f) |
-                      DPIO_TX1_STAGGER_MULT(7) |
-                      DPIO_TX2_STAGGER_MULT(5));
-
-       /* Deassert data lane reset */
-       chv_data_lane_soft_reset(encoder, false);
-
-       /* Clear calc init */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
-       val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
-       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
-       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
-       val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
-       val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
-       val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
-       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
-       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
-       val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
-       val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+       chv_phy_pre_encoder_enable(encoder);
 
        /* FIXME: Program the support xxx V-dB */
        /* Use 800mV-0dB */
-       for (i = 0; i < 4; i++) {
-               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
-               val &= ~DPIO_SWING_DEEMPH9P5_MASK;
-               val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
-       }
-
-       for (i = 0; i < 4; i++) {
-               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
-
-               val &= ~DPIO_SWING_MARGIN000_MASK;
-               val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
-
-               /*
-                * Supposedly this value shouldn't matter when unique transition
-                * scale is disabled, but in fact it does matter. Let's just
-                * always program the same value and hope it's OK.
-                */
-               val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
-               val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
-
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
-       }
-
-       /*
-        * The document said it needs to set bit 27 for ch0 and bit 26
-        * for ch1. Might be a typo in the doc.
-        * For now, for this unique transition scale selection, set bit
-        * 27 for ch0 and ch1.
-        */
-       for (i = 0; i < 4; i++) {
-               val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
-               val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
-               vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
-       }
-
-       /* Start swing calculation */
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
-       val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
-
-       val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
-       val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
-       vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
-
-       mutex_unlock(&dev_priv->sb_lock);
+       chv_set_phy_signal_level(encoder, 128, 102, false);
 
        intel_hdmi->set_infoframes(&encoder->base,
                                   intel_crtc->config->has_hdmi_sink,
@@ -2086,10 +1739,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
        vlv_wait_port_ready(dev_priv, dport, 0x0);
 
        /* Second common lane will stay alive on its own now */
-       if (dport->release_cl2_override) {
-               chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
-               dport->release_cl2_override = false;
-       }
+       chv_phy_release_cl2_override(encoder);
 }
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -2106,6 +1756,8 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_hdmi_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_connector_register,
+       .early_unregister = intel_connector_unregister,
        .destroy = intel_hdmi_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2114,7 +1766,6 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
        .get_modes = intel_hdmi_get_modes,
        .mode_valid = intel_hdmi_mode_valid,
-       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -2138,10 +1789,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
        struct intel_encoder *intel_encoder = &intel_dig_port->base;
        struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port = intel_dig_port->port;
        uint8_t alternate_ddc_pin;
 
+       DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+                     port_name(port));
+
        if (WARN(intel_dig_port->max_lanes < 4,
                 "Not enough lanes (%d) for HDMI on port %c\n",
                 intel_dig_port->max_lanes, port_name(port)))
@@ -2239,12 +1893,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
                intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
        else
                intel_connector->get_hw_state = intel_connector_get_hw_state;
-       intel_connector->unregister = intel_connector_unregister;
 
        intel_hdmi_add_properties(intel_hdmi, connector);
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
-       drm_connector_register(connector);
        intel_hdmi->attached_connector = intel_connector;
 
        /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -2277,7 +1929,7 @@ void intel_hdmi_init(struct drm_device *dev,
        intel_encoder = &intel_dig_port->base;
 
        drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
-                        DRM_MODE_ENCODER_TMDS, NULL);
+                        DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
 
        intel_encoder->compute_config = intel_hdmi_compute_config;
        if (HAS_PCH_SPLIT(dev)) {
index bee673005d48a6f3cb2e97856adfde114ae519a2..f48957ea100d9283b53d02f7aef82ae5cacbb2e9 100644 (file)
@@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
 
 static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_connector *intel_connector;
        struct intel_encoder *intel_encoder;
@@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv),
                             hotplug.reenable_work.work);
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_mode_config *mode_config = &dev->mode_config;
        int i;
 
@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
                }
        }
        if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev);
+               dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock_irq(&dev_priv->irq_lock);
 
        intel_runtime_pm_put(dev_priv);
@@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
                container_of(work, struct drm_i915_private, hotplug.hotplug_work);
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_connector *intel_connector;
        struct intel_encoder *intel_encoder;
@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
 
 /**
  * intel_hpd_irq_handler - main hotplug irq handler
- * @dev: drm device
+ * @dev_priv: drm_i915_private
  * @pin_mask: a mask of hpd pins that have triggered the irq
  * @long_mask: a mask of hpd pins that may be long hpd pulses
  *
@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
  * Here, we do hotplug irq storm detection and mitigation, and pass further
  * processing to appropriate bottom halves.
  */
-void intel_hpd_irq_handler(struct drm_device *dev,
+void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                           u32 pin_mask, u32 long_mask)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
        enum port port;
        bool storm_detected = false;
@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
                         * hotplug bits itself. So only WARN about unexpected
                         * interrupts on saner platforms.
                         */
-                       WARN_ONCE(!HAS_GMCH_DISPLAY(dev),
+                       WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
                                  "Received HPD interrupt on pin %d although disabled\n", i);
                        continue;
                }
@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
        }
 
        if (storm_detected)
-               dev_priv->display.hpd_irq_setup(dev);
+               dev_priv->display.hpd_irq_setup(dev_priv);
        spin_unlock(&dev_priv->irq_lock);
 
        /*
@@ -453,20 +452,47 @@ void intel_hpd_irq_handler(struct drm_device *dev,
  *
  * This is a separate step from interrupt enabling to simplify the locking rules
  * in the driver load and resume code.
+ *
+ * Also see: intel_hpd_poll_init(), which enables connector polling
  */
 void intel_hpd_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
-       struct drm_mode_config *mode_config = &dev->mode_config;
-       struct drm_connector *connector;
        int i;
 
        for_each_hpd_pin(i) {
                dev_priv->hotplug.stats[i].count = 0;
                dev_priv->hotplug.stats[i].state = HPD_ENABLED;
        }
+
+       WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
+       schedule_work(&dev_priv->hotplug.poll_init_work);
+
+       /*
+        * Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked checks happy.
+        */
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->display.hpd_irq_setup)
+               dev_priv->display.hpd_irq_setup(dev_priv);
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
+
+void i915_hpd_poll_init_work(struct work_struct *work) {
+       struct drm_i915_private *dev_priv =
+               container_of(work, struct drm_i915_private,
+                            hotplug.poll_init_work);
+       struct drm_device *dev = &dev_priv->drm;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+       bool enabled;
+
+       mutex_lock(&dev->mode_config.mutex);
+
+       enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
+
        list_for_each_entry(connector, &mode_config->connector_list, head) {
-               struct intel_connector *intel_connector = to_intel_connector(connector);
+               struct intel_connector *intel_connector =
+                       to_intel_connector(connector);
                connector->polled = intel_connector->polled;
 
                /* MST has a dynamic intel_connector->encoder and it's reprobing
@@ -475,24 +501,62 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
                        continue;
 
                if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
-                   intel_connector->encoder->hpd_pin > HPD_NONE)
-                       connector->polled = DRM_CONNECTOR_POLL_HPD;
+                   intel_connector->encoder->hpd_pin > HPD_NONE) {
+                       connector->polled = enabled ?
+                               DRM_CONNECTOR_POLL_CONNECT |
+                               DRM_CONNECTOR_POLL_DISCONNECT :
+                               DRM_CONNECTOR_POLL_HPD;
+               }
        }
 
+       if (enabled)
+               drm_kms_helper_poll_enable_locked(dev);
+
+       mutex_unlock(&dev->mode_config.mutex);
+
        /*
-        * Interrupt setup is already guaranteed to be single-threaded, this is
-        * just to make the assert_spin_locked checks happy.
+        * We might have missed any hotplugs that happened while we were
+        * in the middle of disabling polling
         */
-       spin_lock_irq(&dev_priv->irq_lock);
-       if (dev_priv->display.hpd_irq_setup)
-               dev_priv->display.hpd_irq_setup(dev);
-       spin_unlock_irq(&dev_priv->irq_lock);
+       if (!enabled)
+               drm_helper_hpd_irq_event(dev);
+}
+
+/**
+ * intel_hpd_poll_init - enables/disables polling for connectors with hpd
+ * @dev_priv: i915 device instance
+ * @enabled: Whether to enable or disable polling
+ *
+ * This function enables polling for all connectors, regardless of whether or
+ * not they support hotplug detection. Under certain conditions HPD may not be
+ * functional. On most Intel GPUs, this happens when we enter runtime suspend.
+ * On Valleyview and Cherryview systems, this also happens when we shut off all
+ * of the powerwells.
+ *
+ * Since this function can get called in contexts where we're already holding
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * worker.
+ *
+ * Also see: intel_hpd_init(), which restores hpd handling.
+ */
+void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
+{
+       WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
+
+       /*
+        * We might already be holding dev->mode_config.mutex, so do this in a
+        * seperate worker
+        * As well, there's no issue if we race here since we always reschedule
+        * this worker anyway
+        */
+       schedule_work(&dev_priv->hotplug.poll_init_work);
 }
 
 void intel_hpd_init_work(struct drm_i915_private *dev_priv)
 {
        INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
        INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
+       INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
        INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
                          intel_hpd_irq_storm_reenable_work);
 }
@@ -509,5 +573,33 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
 
        cancel_work_sync(&dev_priv->hotplug.dig_port_work);
        cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+       cancel_work_sync(&dev_priv->hotplug.poll_init_work);
        cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
 }
+
+bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+       bool ret = false;
+
+       if (pin == HPD_NONE)
+               return false;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
+               dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
+               ret = true;
+       }
+       spin_unlock_irq(&dev_priv->irq_lock);
+
+       return ret;
+}
+
+void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
+{
+       if (pin == HPD_NONE)
+               return;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+       spin_unlock_irq(&dev_priv->irq_lock);
+}
index 81de23098be726adf14515720e40e0eee6ecb973..1f266d7df2ec076afdffefef40ec870f9115291e 100644 (file)
@@ -113,7 +113,7 @@ to_intel_gmbus(struct i2c_adapter *i2c)
 void
 intel_i2c_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(GMBUS0, 0);
        I915_WRITE(GMBUS4, 0);
@@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
 static u32 get_reserved(struct intel_gmbus *bus)
 {
        struct drm_i915_private *dev_priv = bus->dev_priv;
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        u32 reserved = 0;
 
        /* On most chips, these bits must be preserved in software. */
@@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
                                               adapter);
        struct drm_i915_private *dev_priv = bus->dev_priv;
 
-       intel_i2c_reset(dev_priv->dev);
+       intel_i2c_reset(&dev_priv->drm);
        intel_i2c_quirk_set(dev_priv, true);
        set_data(bus, 1);
        set_clock(bus, 1);
@@ -298,15 +298,16 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
-
        if (!HAS_GMBUS_IRQ(dev_priv))
-               return wait_for(C, 10);
+               return intel_wait_for_register(dev_priv,
+                                              GMBUS2, GMBUS_ACTIVE, 0,
+                                              10);
 
        /* Important: The hw handles only the first bit, so set only one! */
        I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
 
-       ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
+       ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
+                                (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
                                 msecs_to_jiffies_timeout(10));
 
        I915_WRITE(GMBUS4, 0);
@@ -315,7 +316,6 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
                return 0;
        else
                return -ETIMEDOUT;
-#undef C
 }
 
 static int
@@ -632,7 +632,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
  */
 int intel_setup_gmbus(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_gmbus *bus;
        unsigned int pin;
        int ret;
@@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev)
                        goto err;
        }
 
-       intel_i2c_reset(dev_priv->dev);
+       intel_i2c_reset(&dev_priv->drm);
 
        return 0;
 
@@ -736,7 +736,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
 
 void intel_teardown_gmbus(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_gmbus *bus;
        unsigned int pin;
 
index 42eac37de047b31b1b496a86ac8bf717569e3cfe..414ddda4392255b1d5479c66b4d75f86235ea2e8 100644 (file)
        reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
 } while (0)
 
-enum {
-       ADVANCED_CONTEXT = 0,
-       LEGACY_32B_CONTEXT,
-       ADVANCED_AD_CONTEXT,
-       LEGACY_64B_CONTEXT
-};
-#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
-#define GEN8_CTX_ADDRESSING_MODE(dev)  (USES_FULL_48BIT_PPGTT(dev) ?\
-               LEGACY_64B_CONTEXT :\
-               LEGACY_32B_CONTEXT)
 enum {
        FAULT_AND_HANG = 0,
        FAULT_AND_HALT, /* Debug only */
@@ -224,15 +214,21 @@ enum {
        FAULT_AND_CONTINUE /* Unsupported */
 };
 #define GEN8_CTX_ID_SHIFT 32
+#define GEN8_CTX_ID_WIDTH 21
 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT       0x17
 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT       0x26
 
-static int intel_lr_context_pin(struct intel_context *ctx,
+/* Typical size of the average request (2 pipecontrols and a MI_BB) */
+#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
+
+static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+                                           struct intel_engine_cs *engine);
+static int intel_lr_context_pin(struct i915_gem_context *ctx,
                                struct intel_engine_cs *engine);
 
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
- * @dev: DRM device.
+ * @dev_priv: i915 device private
  * @enable_execlists: value of i915.enable_execlists module parameter.
  *
  * Only certain platforms support Execlists (the prerequisites being
@@ -240,23 +236,22 @@ static int intel_lr_context_pin(struct intel_context *ctx,
  *
  * Return: 1 if Execlists is supported and has to be enabled.
  */
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
 {
-       WARN_ON(i915.enable_ppgtt == -1);
-
        /* On platforms with execlist available, vGPU will only
         * support execlist mode, no ring buffer mode.
         */
-       if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev))
+       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
                return 1;
 
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                return 1;
 
        if (enable_execlists == 0)
                return 0;
 
-       if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
+       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
+           USES_PPGTT(dev_priv) &&
            i915.use_mmio_flip >= 0)
                return 1;
 
@@ -266,19 +261,17 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
 static void
 logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       if (IS_GEN8(dev) || IS_GEN9(dev))
+       if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
                engine->idle_lite_restore_wa = ~0;
 
-       engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-                                       IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+       engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+                                       IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
                                        (engine->id == VCS || engine->id == VCS2);
 
        engine->ctx_desc_template = GEN8_CTX_VALID;
-       engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
-                                  GEN8_CTX_ADDRESSING_MODE_SHIFT;
-       if (IS_GEN8(dev))
+       if (IS_GEN8(dev_priv))
                engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
        engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
 
@@ -297,7 +290,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
  *                                       descriptor for a pinned context
  *
  * @ctx: Context to work on
- * @ring: Engine the descriptor will be used with
+ * @engine: Engine the descriptor will be used with
  *
  * The context descriptor encodes various attributes of a context,
  * including its GTT address and some flags. Because it's fairly
@@ -305,62 +298,42 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
  * which remains valid until the context is unpinned.
  *
  * This is what a descriptor looks like, from LSB to MSB:
- *    bits 0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
+ *    bits  0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
  *    bits 12-31:    LRCA, GTT address of (the HWSP of) this context
- *    bits 32-51:    ctx ID, a globally unique tag (the LRCA again!)
- *    bits 52-63:    reserved, may encode the engine ID (for GuC)
+ *    bits 32-52:    ctx ID, a globally unique tag
+ *    bits 53-54:    mbz, reserved for use by hardware
+ *    bits 55-63:    group ID, currently unused and set to 0
  */
 static void
-intel_lr_context_descriptor_update(struct intel_context *ctx,
+intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
                                   struct intel_engine_cs *engine)
 {
-       uint64_t lrca, desc;
+       struct intel_context *ce = &ctx->engine[engine->id];
+       u64 desc;
 
-       lrca = ctx->engine[engine->id].lrc_vma->node.start +
-              LRC_PPHWSP_PN * PAGE_SIZE;
+       BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
 
-       desc = engine->ctx_desc_template;                          /* bits  0-11 */
-       desc |= lrca;                                      /* bits 12-31 */
-       desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
+       desc = ctx->desc_template;                              /* bits  3-4  */
+       desc |= engine->ctx_desc_template;                      /* bits  0-11 */
+       desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
+                                                               /* bits 12-31 */
+       desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;           /* bits 32-52 */
 
-       ctx->engine[engine->id].lrc_desc = desc;
+       ce->lrc_desc = desc;
 }
 
-uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
                                     struct intel_engine_cs *engine)
 {
        return ctx->engine[engine->id].lrc_desc;
 }
 
-/**
- * intel_execlists_ctx_id() - get the Execlists Context ID
- * @ctx: Context to get the ID for
- * @ring: Engine to get the ID for
- *
- * Do not confuse with ctx->id! Unfortunately we have a name overload
- * here: the old context ID we pass to userspace as a handler so that
- * they can refer to a context, and the new context ID we pass to the
- * ELSP so that the GPU can inform us of the context status via
- * interrupts.
- *
- * The context ID is a portion of the context descriptor, so we can
- * just extract the required part from the cached descriptor.
- *
- * Return: 20-bits globally unique context ID.
- */
-u32 intel_execlists_ctx_id(struct intel_context *ctx,
-                          struct intel_engine_cs *engine)
-{
-       return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
-}
-
 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
                                 struct drm_i915_gem_request *rq1)
 {
 
        struct intel_engine_cs *engine = rq0->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = rq0->i915;
        uint64_t desc[2];
 
        if (rq1) {
@@ -431,6 +404,20 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
        spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
+static inline void execlists_context_status_change(
+               struct drm_i915_gem_request *rq,
+               unsigned long status)
+{
+       /*
+        * Only used when GVT-g is enabled now. When GVT-g is disabled,
+        * The compiler should eliminate this function as dead-code.
+        */
+       if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
+               return;
+
+       atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+}
+
 static void execlists_context_unqueue(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
@@ -442,7 +429,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
         * If irqs are not active generate a warning as batches that finish
         * without the irqs may get lost and a GPU Hang may occur.
         */
-       WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
+       WARN_ON(!intel_irqs_enabled(engine->i915));
 
        /* Try to read in pairs */
        list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -453,10 +440,24 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
                        /* Same ctx: ignore first request, as second request
                         * will update tail past first request's workload */
                        cursor->elsp_submitted = req0->elsp_submitted;
-                       list_move_tail(&req0->execlist_link,
-                                      &engine->execlist_retired_req_list);
+                       list_del(&req0->execlist_link);
+                       i915_gem_request_unreference(req0);
                        req0 = cursor;
                } else {
+                       if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
+                               /*
+                                * req0 (after merged) ctx requires single
+                                * submission, stop picking
+                                */
+                               if (req0->ctx->execlists_force_single_submission)
+                                       break;
+                               /*
+                                * req0 ctx doesn't require single submission,
+                                * but next req ctx requires, stop picking
+                                */
+                               if (cursor->ctx->execlists_force_single_submission)
+                                       break;
+                       }
                        req1 = cursor;
                        WARN_ON(req1->elsp_submitted);
                        break;
@@ -466,6 +467,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
        if (unlikely(!req0))
                return;
 
+       execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
+
+       if (req1)
+               execlists_context_status_change(req1,
+                                               INTEL_CONTEXT_SCHEDULE_IN);
+
        if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
                /*
                 * WaIdleLiteRestore: make sure we never cause a lite restore
@@ -486,7 +493,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
 }
 
 static unsigned int
-execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
+execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
 {
        struct drm_i915_gem_request *head_req;
 
@@ -496,19 +503,18 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
                                            struct drm_i915_gem_request,
                                            execlist_link);
 
-       if (!head_req)
-               return 0;
-
-       if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
-               return 0;
+       if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
+               return 0;
 
        WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
 
        if (--head_req->elsp_submitted > 0)
                return 0;
 
-       list_move_tail(&head_req->execlist_link,
-                      &engine->execlist_retired_req_list);
+       execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
+
+       list_del(&head_req->execlist_link);
+       i915_gem_request_unreference(head_req);
 
        return 1;
 }
@@ -517,7 +523,7 @@ static u32
 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
                   u32 *context_id)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u32 status;
 
        read_pointer %= GEN8_CSB_ENTRIES;
@@ -535,7 +541,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
 
 /**
  * intel_lrc_irq_handler() - handle Context Switch interrupts
- * @engine: Engine Command Streamer to handle.
+ * @data: tasklet handler passed in unsigned long
  *
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
@@ -543,7 +549,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
 static void intel_lrc_irq_handler(unsigned long data)
 {
        struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u32 status_pointer;
        unsigned int read_pointer, write_pointer;
        u32 csb[GEN8_CSB_ENTRIES][2];
@@ -612,11 +618,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
        struct drm_i915_gem_request *cursor;
        int num_elements = 0;
 
-       if (request->ctx != request->i915->kernel_context)
-               intel_lr_context_pin(request->ctx, engine);
-
-       i915_gem_request_reference(request);
-
        spin_lock_bh(&engine->execlist_lock);
 
        list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
@@ -633,12 +634,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
                if (request->ctx == tail_req->ctx) {
                        WARN(tail_req->elsp_submitted != 0,
                                "More than 2 already-submitted reqs queued\n");
-                       list_move_tail(&tail_req->execlist_link,
-                                      &engine->execlist_retired_req_list);
+                       list_del(&tail_req->execlist_link);
+                       i915_gem_request_unreference(tail_req);
                }
        }
 
+       i915_gem_request_reference(request);
        list_add_tail(&request->execlist_link, &engine->execlist_queue);
+       request->ctx_hw_id = request->ctx->hw_id;
        if (num_elements == 0)
                execlists_context_unqueue(engine);
 
@@ -698,9 +701,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-       int ret = 0;
+       struct intel_engine_cs *engine = request->engine;
+       struct intel_context *ce = &request->ctx->engine[engine->id];
+       int ret;
+
+       /* Flush enough space to reduce the likelihood of waiting after
+        * we start building the request - in which case we will just
+        * have to repeat work.
+        */
+       request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+       if (!ce->state) {
+               ret = execlists_context_deferred_alloc(request->ctx, engine);
+               if (ret)
+                       return ret;
+       }
 
-       request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
+       request->ringbuf = ce->ringbuf;
 
        if (i915.enable_guc_submission) {
                /*
@@ -708,16 +725,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
                 * going any further, as the i915_add_request() call
                 * later on mustn't fail ...
                 */
-               struct intel_guc *guc = &request->i915->guc;
-
-               ret = i915_guc_wq_check_space(guc->execbuf_client);
+               ret = i915_guc_wq_check_space(request);
                if (ret)
                        return ret;
        }
 
-       if (request->ctx != request->i915->kernel_context)
-               ret = intel_lr_context_pin(request->ctx, request->engine);
+       ret = intel_lr_context_pin(request->ctx, engine);
+       if (ret)
+               return ret;
+
+       ret = intel_ring_begin(request, 0);
+       if (ret)
+               goto err_unpin;
+
+       if (!ce->initialised) {
+               ret = engine->init_context(request);
+               if (ret)
+                       goto err_unpin;
+
+               ce->initialised = true;
+       }
+
+       /* Note that after this point, we have committed to using
+        * this request as it is being used to both track the
+        * state of engine initialisation and liveness of the
+        * golden renderstate above. Think twice before you try
+        * to cancel/unwind this request now.
+        */
+
+       request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+       return 0;
 
+err_unpin:
+       intel_lr_context_unpin(request->ctx, engine);
        return ret;
 }
 
@@ -734,7 +774,6 @@ static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
-       struct drm_i915_private *dev_priv = request->i915;
        struct intel_engine_cs *engine = request->engine;
 
        intel_logical_ring_advance(ringbuf);
@@ -750,54 +789,28 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        intel_logical_ring_advance(ringbuf);
 
-       if (intel_engine_stopped(engine))
-               return 0;
-
-       if (engine->last_context != request->ctx) {
-               if (engine->last_context)
-                       intel_lr_context_unpin(engine->last_context, engine);
-               if (request->ctx != request->i915->kernel_context) {
-                       intel_lr_context_pin(request->ctx, engine);
-                       engine->last_context = request->ctx;
-               } else {
-                       engine->last_context = NULL;
-               }
-       }
+       /* We keep the previous context alive until we retire the following
+        * request. This ensures that any the context object is still pinned
+        * for any residual writes the HW makes into it on the context switch
+        * into the next object following the breadcrumb. Otherwise, we may
+        * retire the context too early.
+        */
+       request->previous_context = engine->last_context;
+       engine->last_context = request->ctx;
 
-       if (dev_priv->guc.execbuf_client)
-               i915_guc_submit(dev_priv->guc.execbuf_client, request);
+       if (i915.enable_guc_submission)
+               i915_guc_submit(request);
        else
                execlists_context_queue(request);
 
        return 0;
 }
 
-int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
-{
-       /*
-        * The first call merely notes the reserve request and is common for
-        * all back ends. The subsequent localised _begin() call actually
-        * ensures that the reservation is available. Without the begin, if
-        * the request creator immediately submitted the request without
-        * adding any commands to it then there might not actually be
-        * sufficient room for the submission commands.
-        */
-       intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
-
-       return intel_ring_begin(request, 0);
-}
-
 /**
  * execlists_submission() - submit a batchbuffer for execution, Execlists style
- * @dev: DRM device.
- * @file: DRM file.
- * @ring: Engine Command Streamer to submit to.
- * @ctx: Context to employ for this submission.
+ * @params: execbuffer call parameters.
  * @args: execbuffer call arguments.
  * @vmas: list of vmas.
- * @batch_obj: the batchbuffer to submit.
- * @exec_start: batchbuffer start virtual address pointer.
- * @dispatch_flags: translated execbuffer call flags.
  *
  * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
  * away the submission details of the execbuffer ioctl call.
@@ -810,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 {
        struct drm_device       *dev = params->dev;
        struct intel_engine_cs *engine = params->engine;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
        u64 exec_start;
        int instp_mode;
@@ -881,28 +894,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
        return 0;
 }
 
-void intel_execlists_retire_requests(struct intel_engine_cs *engine)
+void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *req, *tmp;
-       struct list_head retired_list;
+       LIST_HEAD(cancel_list);
 
-       WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
-       if (list_empty(&engine->execlist_retired_req_list))
-               return;
+       WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
 
-       INIT_LIST_HEAD(&retired_list);
        spin_lock_bh(&engine->execlist_lock);
-       list_replace_init(&engine->execlist_retired_req_list, &retired_list);
+       list_replace_init(&engine->execlist_queue, &cancel_list);
        spin_unlock_bh(&engine->execlist_lock);
 
-       list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
-               struct intel_context *ctx = req->ctx;
-               struct drm_i915_gem_object *ctx_obj =
-                               ctx->engine[engine->id].state;
-
-               if (ctx_obj && (ctx != req->i915->kernel_context))
-                       intel_lr_context_unpin(ctx, engine);
-
+       list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
                list_del(&req->execlist_link);
                i915_gem_request_unreference(req);
        }
@@ -910,7 +913,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
 
 void intel_logical_ring_stop(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
 
        if (!intel_engine_initialized(engine))
@@ -923,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
 
        /* TODO: Is this correct with Execlists enabled? */
        I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
-       if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+       if (intel_wait_for_register(dev_priv,
+                                   RING_MI_MODE(engine->mmio_base),
+                                   MODE_IDLE, MODE_IDLE,
+                                   1000)) {
                DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
                return;
        }
@@ -946,25 +952,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
        return 0;
 }
 
-static int intel_lr_context_do_pin(struct intel_context *ctx,
-                                  struct intel_engine_cs *engine)
+static int intel_lr_context_pin(struct i915_gem_context *ctx,
+                               struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
-       struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+       struct drm_i915_private *dev_priv = ctx->i915;
+       struct intel_context *ce = &ctx->engine[engine->id];
        void *vaddr;
        u32 *lrc_reg_state;
        int ret;
 
-       WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+       if (ce->pin_count++)
+               return 0;
 
-       ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
-                       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+       ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
+                                   PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
        if (ret)
-               return ret;
+               goto err;
 
-       vaddr = i915_gem_object_pin_map(ctx_obj);
+       vaddr = i915_gem_object_pin_map(ce->state);
        if (IS_ERR(vaddr)) {
                ret = PTR_ERR(vaddr);
                goto unpin_ctx_obj;
@@ -972,65 +979,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
 
        lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
-       ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
+       ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
        if (ret)
                goto unpin_map;
 
-       ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+       i915_gem_context_reference(ctx);
+       ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
        intel_lr_context_descriptor_update(ctx, engine);
-       lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
-       ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
-       ctx_obj->dirty = true;
+
+       lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
+       ce->lrc_reg_state = lrc_reg_state;
+       ce->state->dirty = true;
 
        /* Invalidate GuC TLB. */
        if (i915.enable_guc_submission)
                I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 
-       return ret;
+       return 0;
 
 unpin_map:
-       i915_gem_object_unpin_map(ctx_obj);
+       i915_gem_object_unpin_map(ce->state);
 unpin_ctx_obj:
-       i915_gem_object_ggtt_unpin(ctx_obj);
-
+       i915_gem_object_ggtt_unpin(ce->state);
+err:
+       ce->pin_count = 0;
        return ret;
 }
 
-static int intel_lr_context_pin(struct intel_context *ctx,
-                               struct intel_engine_cs *engine)
+void intel_lr_context_unpin(struct i915_gem_context *ctx,
+                           struct intel_engine_cs *engine)
 {
-       int ret = 0;
+       struct intel_context *ce = &ctx->engine[engine->id];
 
-       if (ctx->engine[engine->id].pin_count++ == 0) {
-               ret = intel_lr_context_do_pin(ctx, engine);
-               if (ret)
-                       goto reset_pin_count;
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+       GEM_BUG_ON(ce->pin_count == 0);
 
-               i915_gem_context_reference(ctx);
-       }
-       return ret;
+       if (--ce->pin_count)
+               return;
 
-reset_pin_count:
-       ctx->engine[engine->id].pin_count = 0;
-       return ret;
-}
+       intel_unpin_ringbuffer_obj(ce->ringbuf);
 
-void intel_lr_context_unpin(struct intel_context *ctx,
-                           struct intel_engine_cs *engine)
-{
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+       i915_gem_object_unpin_map(ce->state);
+       i915_gem_object_ggtt_unpin(ce->state);
 
-       WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
-       if (--ctx->engine[engine->id].pin_count == 0) {
-               i915_gem_object_unpin_map(ctx_obj);
-               intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
-               i915_gem_object_ggtt_unpin(ctx_obj);
-               ctx->engine[engine->id].lrc_vma = NULL;
-               ctx->engine[engine->id].lrc_desc = 0;
-               ctx->engine[engine->id].lrc_reg_state = NULL;
+       ce->lrc_vma = NULL;
+       ce->lrc_desc = 0;
+       ce->lrc_reg_state = NULL;
 
-               i915_gem_context_unreference(ctx);
-       }
+       i915_gem_context_unreference(ctx);
 }
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1038,9 +1034,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
        int ret, i;
        struct intel_engine_cs *engine = req->engine;
        struct intel_ringbuffer *ringbuf = req->ringbuf;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_workarounds *w = &dev_priv->workarounds;
+       struct i915_workarounds *w = &req->i915->workarounds;
 
        if (w->count == 0)
                return 0;
@@ -1103,15 +1097,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
                                                uint32_t *const batch,
                                                uint32_t index)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
 
        /*
-        * WaDisableLSQCROPERFforOCL:skl
+        * WaDisableLSQCROPERFforOCL:skl,kbl
         * This WA is implemented in skl_init_clock_gating() but since
         * this batch updates GEN8_L3SQCREG4 with default value we need to
         * set this bit here to retain the WA during flush.
         */
-       if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
+           IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
                l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1163,7 +1159,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
 /**
  * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
  *
- * @ring: only applicable for RCS
+ * @engine: only applicable for RCS
  * @wa_ctx: structure representing wa_ctx
  *  offset: specifies start of the batch, should be cache-aligned. This is updated
  *    with the offset value received as input.
@@ -1200,7 +1196,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
        wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
-       if (IS_BROADWELL(engine->dev)) {
+       if (IS_BROADWELL(engine->i915)) {
                int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
                if (rc < 0)
                        return rc;
@@ -1237,7 +1233,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
 /**
  * gen8_init_perctx_bb() - initialize per ctx batch with WA
  *
- * @ring: only applicable for RCS
+ * @engine: only applicable for RCS
  * @wa_ctx: structure representing wa_ctx
  *  offset: specifies start of the batch, should be cache-aligned.
  *  size: size of the batch in DWORDS but HW expects in terms of cachelines
@@ -1272,12 +1268,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
                                    uint32_t *offset)
 {
        int ret;
-       struct drm_device *dev = engine->dev;
+       struct drm_i915_private *dev_priv = engine->i915;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaDisableCtxRestoreArbitration:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1286,6 +1282,47 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
                return ret;
        index = ret;
 
+       /* WaClearSlmSpaceAtContextSwitch:kbl */
+       /* Actual scratch location is at 128 bytes offset */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
+               uint32_t scratch_addr
+                       = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+               wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+               wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+                                          PIPE_CONTROL_GLOBAL_GTT_IVB |
+                                          PIPE_CONTROL_CS_STALL |
+                                          PIPE_CONTROL_QW_WRITE));
+               wa_ctx_emit(batch, index, scratch_addr);
+               wa_ctx_emit(batch, index, 0);
+               wa_ctx_emit(batch, index, 0);
+               wa_ctx_emit(batch, index, 0);
+       }
+
+       /* WaMediaPoolStateCmdInWABB:bxt */
+       if (HAS_POOLED_EU(engine->i915)) {
+               /*
+                * EU pool configuration is setup along with golden context
+                * during context initialization. This value depends on
+                * device type (2x6 or 3x6) and needs to be updated based
+                * on which subslice is disabled especially for 2x6
+                * devices, however it is safe to load default
+                * configuration of 3x6 device instead of masking off
+                * corresponding bits because HW ignores bits of a disabled
+                * subslice and drops down to appropriate config. Please
+                * see render_state_setup() in i915_gem_render_state.c for
+                * possible configurations, to avoid duplication they are
+                * not shown here again.
+                */
+               u32 eu_pool_config = 0x00777000;
+               wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
+               wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
+               wa_ctx_emit(batch, index, eu_pool_config);
+               wa_ctx_emit(batch, index, 0);
+               wa_ctx_emit(batch, index, 0);
+               wa_ctx_emit(batch, index, 0);
+       }
+
        /* Pad to end of cacheline */
        while (index % CACHELINE_DWORDS)
                wa_ctx_emit(batch, index, MI_NOOP);
@@ -1298,12 +1335,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
                               uint32_t *const batch,
                               uint32_t *offset)
 {
-       struct drm_device *dev = engine->dev;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
                wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
                wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
                wa_ctx_emit(batch, index,
@@ -1312,7 +1348,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
        }
 
        /* WaClearTdlStateAckDirtyBits:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+       if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
                wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
 
                wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1331,8 +1367,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
        }
 
        /* WaDisableCtxRestoreArbitration:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
+           IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
                wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 
        wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1344,11 +1380,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 {
        int ret;
 
-       engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
-                                                  PAGE_ALIGN(size));
-       if (!engine->wa_ctx.obj) {
+       engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
+                                                   PAGE_ALIGN(size));
+       if (IS_ERR(engine->wa_ctx.obj)) {
                DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
-               return -ENOMEM;
+               ret = PTR_ERR(engine->wa_ctx.obj);
+               engine->wa_ctx.obj = NULL;
+               return ret;
        }
 
        ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
@@ -1382,9 +1420,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
        WARN_ON(engine->id != RCS);
 
        /* update this when WA for higher Gen are added */
-       if (INTEL_INFO(engine->dev)->gen > 9) {
+       if (INTEL_GEN(engine->i915) > 9) {
                DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
-                         INTEL_INFO(engine->dev)->gen);
+                         INTEL_GEN(engine->i915));
                return 0;
        }
 
@@ -1404,7 +1442,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
        batch = kmap_atomic(page);
        offset = 0;
 
-       if (INTEL_INFO(engine->dev)->gen == 8) {
+       if (IS_GEN8(engine->i915)) {
                ret = gen8_init_indirectctx_bb(engine,
                                               &wa_ctx->indirect_ctx,
                                               batch,
@@ -1418,7 +1456,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
                                          &offset);
                if (ret)
                        goto out;
-       } else if (INTEL_INFO(engine->dev)->gen == 9) {
+       } else if (IS_GEN9(engine->i915)) {
                ret = gen9_init_indirectctx_bb(engine,
                                               &wa_ctx->indirect_ctx,
                                               batch,
@@ -1444,7 +1482,7 @@ out:
 
 static void lrc_init_hws(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        I915_WRITE(RING_HWS_PGA(engine->mmio_base),
                   (u32)engine->status_page.gfx_addr);
@@ -1453,8 +1491,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
 
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        unsigned int next_context_status_buffer_hw;
 
        lrc_init_hws(engine);
@@ -1501,8 +1538,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
 
 static int gen8_init_render_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
 
        ret = gen8_init_common_ring(engine);
@@ -1579,7 +1615,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
        if (req->ctx->ppgtt &&
            (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
                if (!USES_FULL_48BIT_PPGTT(req->i915) &&
-                   !intel_vgpu_active(req->i915->dev)) {
+                   !intel_vgpu_active(req->i915)) {
                        ret = intel_logical_ring_emit_pdps(req);
                        if (ret)
                                return ret;
@@ -1605,38 +1641,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
        return 0;
 }
 
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               I915_WRITE_IMR(engine,
-                              ~(engine->irq_enable_mask | engine->irq_keep_mask));
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       struct drm_i915_private *dev_priv = engine->i915;
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask | engine->irq_keep_mask));
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       struct drm_i915_private *dev_priv = engine->i915;
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1645,8 +1661,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct intel_engine_cs *engine = ringbuf->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = request->i915;
        uint32_t cmd;
        int ret;
 
@@ -1687,9 +1702,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct intel_engine_cs *engine = ringbuf->engine;
        u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
-       bool vf_flush_wa = false;
+       bool vf_flush_wa = false, dc_flush_wa = false;
        u32 flags = 0;
        int ret;
+       int len;
 
        flags |= PIPE_CONTROL_CS_STALL;
 
@@ -1714,11 +1730,23 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
                 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
                 * pipe control.
                 */
-               if (IS_GEN9(engine->dev))
+               if (IS_GEN9(request->i915))
                        vf_flush_wa = true;
+
+               /* WaForGAMHang:kbl */
+               if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
+                       dc_flush_wa = true;
        }
 
-       ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
+       len = 6;
+
+       if (vf_flush_wa)
+               len += 6;
+
+       if (dc_flush_wa)
+               len += 12;
+
+       ret = intel_ring_begin(request, len);
        if (ret)
                return ret;
 
@@ -1731,25 +1759,34 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
                intel_logical_ring_emit(ringbuf, 0);
        }
 
+       if (dc_flush_wa) {
+               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+               intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+       }
+
        intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
        intel_logical_ring_emit(ringbuf, flags);
        intel_logical_ring_emit(ringbuf, scratch_addr);
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_advance(ringbuf);
 
-       return 0;
-}
+       if (dc_flush_wa) {
+               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+               intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+       }
 
-static u32 gen8_get_seqno(struct intel_engine_cs *engine)
-{
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
+       intel_logical_ring_advance(ringbuf);
 
-static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+       return 0;
 }
 
 static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
@@ -1767,14 +1804,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
        intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
 }
 
-static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-
-       /* See bxt_a_get_seqno() explaining the reason for the clflush. */
-       intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
 /*
  * Reserve space for 2 NOOPs at the end of each request to be
  * used as a workaround for not being allowed to do lite
@@ -1782,11 +1811,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
  */
 #define WA_TAIL_DWORDS 2
 
-static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
-{
-       return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
-}
-
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1802,10 +1826,10 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf,
                                (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
        intel_logical_ring_emit(ringbuf,
-                               hws_seqno_address(request->engine) |
+                               intel_hws_seqno_address(request->engine) |
                                MI_FLUSH_DW_USE_GTT);
        intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       intel_logical_ring_emit(ringbuf, request->seqno);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
        intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
@@ -1832,7 +1856,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
                                (PIPE_CONTROL_GLOBAL_GTT_IVB |
                                 PIPE_CONTROL_CS_STALL |
                                 PIPE_CONTROL_QW_WRITE));
-       intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
+       intel_logical_ring_emit(ringbuf,
+                               intel_hws_seqno_address(request->engine));
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
        /* We're thrashing one dword of HWS. */
@@ -1894,7 +1919,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 /**
  * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
  *
- * @ring: Engine Command Streamer.
+ * @engine: Engine Command Streamer.
  *
  */
 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
@@ -1911,7 +1936,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
        if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
                tasklet_kill(&engine->irq_tasklet);
 
-       dev_priv = engine->dev->dev_private;
+       dev_priv = engine->i915;
 
        if (engine->buffer) {
                intel_logical_ring_stop(engine);
@@ -1924,36 +1949,34 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
        i915_cmd_parser_fini_ring(engine);
        i915_gem_batch_pool_fini(&engine->batch_pool);
 
+       intel_engine_fini_breadcrumbs(engine);
+
        if (engine->status_page.obj) {
                i915_gem_object_unpin_map(engine->status_page.obj);
                engine->status_page.obj = NULL;
        }
+       intel_lr_context_unpin(dev_priv->kernel_context, engine);
 
        engine->idle_lite_restore_wa = 0;
        engine->disable_lite_restore_wa = false;
        engine->ctx_desc_template = 0;
 
        lrc_destroy_wa_ctx_obj(engine);
-       engine->dev = NULL;
+       engine->i915 = NULL;
 }
 
 static void
-logical_ring_default_vfuncs(struct drm_device *dev,
-                           struct intel_engine_cs *engine)
+logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
        /* Default vfuncs which can be overriden by each engine. */
        engine->init_hw = gen8_init_common_ring;
        engine->emit_request = gen8_emit_request;
        engine->emit_flush = gen8_emit_flush;
-       engine->irq_get = gen8_logical_ring_get_irq;
-       engine->irq_put = gen8_logical_ring_put_irq;
+       engine->irq_enable = gen8_logical_ring_enable_irq;
+       engine->irq_disable = gen8_logical_ring_disable_irq;
        engine->emit_bb_start = gen8_emit_bb_start;
-       engine->get_seqno = gen8_get_seqno;
-       engine->set_seqno = gen8_set_seqno;
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
                engine->irq_seqno_barrier = bxt_a_seqno_barrier;
-               engine->set_seqno = bxt_a_set_seqno;
-       }
 }
 
 static inline void
@@ -1982,60 +2005,28 @@ lrc_setup_hws(struct intel_engine_cs *engine,
 }
 
 static int
-logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
+logical_ring_init(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_context *dctx = dev_priv->kernel_context;
-       enum forcewake_domains fw_domains;
+       struct i915_gem_context *dctx = engine->i915->kernel_context;
        int ret;
 
-       /* Intentionally left blank. */
-       engine->buffer = NULL;
-
-       engine->dev = dev;
-       INIT_LIST_HEAD(&engine->active_list);
-       INIT_LIST_HEAD(&engine->request_list);
-       i915_gem_batch_pool_init(dev, &engine->batch_pool);
-       init_waitqueue_head(&engine->irq_queue);
-
-       INIT_LIST_HEAD(&engine->buffers);
-       INIT_LIST_HEAD(&engine->execlist_queue);
-       INIT_LIST_HEAD(&engine->execlist_retired_req_list);
-       spin_lock_init(&engine->execlist_lock);
-
-       tasklet_init(&engine->irq_tasklet,
-                    intel_lrc_irq_handler, (unsigned long)engine);
-
-       logical_ring_init_platform_invariants(engine);
-
-       fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
-                                                   RING_ELSP(engine),
-                                                   FW_REG_WRITE);
-
-       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
-                                                    RING_CONTEXT_STATUS_PTR(engine),
-                                                    FW_REG_READ | FW_REG_WRITE);
-
-       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
-                                                    RING_CONTEXT_STATUS_BUF_BASE(engine),
-                                                    FW_REG_READ);
-
-       engine->fw_domains = fw_domains;
+       ret = intel_engine_init_breadcrumbs(engine);
+       if (ret)
+               goto error;
 
        ret = i915_cmd_parser_init_ring(engine);
        if (ret)
                goto error;
 
-       ret = intel_lr_context_deferred_alloc(dctx, engine);
+       ret = execlists_context_deferred_alloc(dctx, engine);
        if (ret)
                goto error;
 
        /* As this is the default context, always pin it */
-       ret = intel_lr_context_do_pin(dctx, engine);
+       ret = intel_lr_context_pin(dctx, engine);
        if (ret) {
-               DRM_ERROR(
-                       "Failed to pin and map ringbuffer %s: %d\n",
-                       engine->name, ret);
+               DRM_ERROR("Failed to pin context for %s: %d\n",
+                         engine->name, ret);
                goto error;
        }
 
@@ -2053,26 +2044,16 @@ error:
        return ret;
 }
 
-static int logical_render_ring_init(struct drm_device *dev)
+static int logical_render_ring_init(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
 
-       engine->name = "render ring";
-       engine->id = RCS;
-       engine->exec_id = I915_EXEC_RENDER;
-       engine->guc_id = GUC_RENDER_ENGINE;
-       engine->mmio_base = RENDER_RING_BASE;
-
-       logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
-       if (HAS_L3_DPF(dev))
+       if (HAS_L3_DPF(dev_priv))
                engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
-       logical_ring_default_vfuncs(dev, engine);
-
        /* Override some for render ring. */
-       if (INTEL_INFO(dev)->gen >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                engine->init_hw = gen9_init_render_ring;
        else
                engine->init_hw = gen8_init_render_ring;
@@ -2081,9 +2062,7 @@ static int logical_render_ring_init(struct drm_device *dev)
        engine->emit_flush = gen8_emit_flush_render;
        engine->emit_request = gen8_emit_request_render;
 
-       engine->dev = dev;
-
-       ret = intel_init_pipe_control(engine);
+       ret = intel_init_pipe_control(engine, 4096);
        if (ret)
                return ret;
 
@@ -2098,7 +2077,7 @@ static int logical_render_ring_init(struct drm_device *dev)
                          ret);
        }
 
-       ret = logical_ring_init(dev, engine);
+       ret = logical_ring_init(engine);
        if (ret) {
                lrc_destroy_wa_ctx_obj(engine);
        }
@@ -2106,133 +2085,164 @@ static int logical_render_ring_init(struct drm_device *dev)
        return ret;
 }
 
-static int logical_bsd_ring_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *engine = &dev_priv->engine[VCS];
-
-       engine->name = "bsd ring";
-       engine->id = VCS;
-       engine->exec_id = I915_EXEC_BSD;
-       engine->guc_id = GUC_VIDEO_ENGINE;
-       engine->mmio_base = GEN6_BSD_RING_BASE;
-
-       logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
-       logical_ring_default_vfuncs(dev, engine);
-
-       return logical_ring_init(dev, engine);
-}
+static const struct logical_ring_info {
+       const char *name;
+       unsigned exec_id;
+       unsigned guc_id;
+       u32 mmio_base;
+       unsigned irq_shift;
+       int (*init)(struct intel_engine_cs *engine);
+} logical_rings[] = {
+       [RCS] = {
+               .name = "render ring",
+               .exec_id = I915_EXEC_RENDER,
+               .guc_id = GUC_RENDER_ENGINE,
+               .mmio_base = RENDER_RING_BASE,
+               .irq_shift = GEN8_RCS_IRQ_SHIFT,
+               .init = logical_render_ring_init,
+       },
+       [BCS] = {
+               .name = "blitter ring",
+               .exec_id = I915_EXEC_BLT,
+               .guc_id = GUC_BLITTER_ENGINE,
+               .mmio_base = BLT_RING_BASE,
+               .irq_shift = GEN8_BCS_IRQ_SHIFT,
+               .init = logical_ring_init,
+       },
+       [VCS] = {
+               .name = "bsd ring",
+               .exec_id = I915_EXEC_BSD,
+               .guc_id = GUC_VIDEO_ENGINE,
+               .mmio_base = GEN6_BSD_RING_BASE,
+               .irq_shift = GEN8_VCS1_IRQ_SHIFT,
+               .init = logical_ring_init,
+       },
+       [VCS2] = {
+               .name = "bsd2 ring",
+               .exec_id = I915_EXEC_BSD,
+               .guc_id = GUC_VIDEO_ENGINE2,
+               .mmio_base = GEN8_BSD2_RING_BASE,
+               .irq_shift = GEN8_VCS2_IRQ_SHIFT,
+               .init = logical_ring_init,
+       },
+       [VECS] = {
+               .name = "video enhancement ring",
+               .exec_id = I915_EXEC_VEBOX,
+               .guc_id = GUC_VIDEOENHANCE_ENGINE,
+               .mmio_base = VEBOX_RING_BASE,
+               .irq_shift = GEN8_VECS_IRQ_SHIFT,
+               .init = logical_ring_init,
+       },
+};
 
-static int logical_bsd2_ring_init(struct drm_device *dev)
+static struct intel_engine_cs *
+logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
+       const struct logical_ring_info *info = &logical_rings[id];
+       struct intel_engine_cs *engine = &dev_priv->engine[id];
+       enum forcewake_domains fw_domains;
 
-       engine->name = "bsd2 ring";
-       engine->id = VCS2;
-       engine->exec_id = I915_EXEC_BSD;
-       engine->guc_id = GUC_VIDEO_ENGINE2;
-       engine->mmio_base = GEN8_BSD2_RING_BASE;
+       engine->id = id;
+       engine->name = info->name;
+       engine->exec_id = info->exec_id;
+       engine->guc_id = info->guc_id;
+       engine->mmio_base = info->mmio_base;
 
-       logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
-       logical_ring_default_vfuncs(dev, engine);
+       engine->i915 = dev_priv;
 
-       return logical_ring_init(dev, engine);
-}
+       /* Intentionally left blank. */
+       engine->buffer = NULL;
 
-static int logical_blt_ring_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *engine = &dev_priv->engine[BCS];
+       fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+                                                   RING_ELSP(engine),
+                                                   FW_REG_WRITE);
+
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_PTR(engine),
+                                                    FW_REG_READ | FW_REG_WRITE);
 
-       engine->name = "blitter ring";
-       engine->id = BCS;
-       engine->exec_id = I915_EXEC_BLT;
-       engine->guc_id = GUC_BLITTER_ENGINE;
-       engine->mmio_base = BLT_RING_BASE;
+       fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+                                                    RING_CONTEXT_STATUS_BUF_BASE(engine),
+                                                    FW_REG_READ);
 
-       logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
-       logical_ring_default_vfuncs(dev, engine);
+       engine->fw_domains = fw_domains;
 
-       return logical_ring_init(dev, engine);
-}
+       INIT_LIST_HEAD(&engine->active_list);
+       INIT_LIST_HEAD(&engine->request_list);
+       INIT_LIST_HEAD(&engine->buffers);
+       INIT_LIST_HEAD(&engine->execlist_queue);
+       spin_lock_init(&engine->execlist_lock);
 
-static int logical_vebox_ring_init(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *engine = &dev_priv->engine[VECS];
+       tasklet_init(&engine->irq_tasklet,
+                    intel_lrc_irq_handler, (unsigned long)engine);
 
-       engine->name = "video enhancement ring";
-       engine->id = VECS;
-       engine->exec_id = I915_EXEC_VEBOX;
-       engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
-       engine->mmio_base = VEBOX_RING_BASE;
+       logical_ring_init_platform_invariants(engine);
+       logical_ring_default_vfuncs(engine);
+       logical_ring_default_irqs(engine, info->irq_shift);
 
-       logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
-       logical_ring_default_vfuncs(dev, engine);
+       intel_engine_init_hangcheck(engine);
+       i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
 
-       return logical_ring_init(dev, engine);
+       return engine;
 }
 
 /**
  * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
  * @dev: DRM device.
  *
- * This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
- * those engines that are present in the hardware.
+ * This function inits the engines for an Execlists submission style (the
+ * equivalent in the legacy ringbuffer submission world would be
+ * i915_gem_init_engines). It does it only for those engines that are present in
+ * the hardware.
  *
  * Return: non-zero if the initialization failed.
  */
 int intel_logical_rings_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       unsigned int mask = 0;
+       unsigned int i;
        int ret;
 
-       ret = logical_render_ring_init(dev);
-       if (ret)
-               return ret;
+       WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
+               GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
 
-       if (HAS_BSD(dev)) {
-               ret = logical_bsd_ring_init(dev);
-               if (ret)
-                       goto cleanup_render_ring;
-       }
+       for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
+               if (!HAS_ENGINE(dev_priv, i))
+                       continue;
 
-       if (HAS_BLT(dev)) {
-               ret = logical_blt_ring_init(dev);
-               if (ret)
-                       goto cleanup_bsd_ring;
-       }
+               if (!logical_rings[i].init)
+                       continue;
 
-       if (HAS_VEBOX(dev)) {
-               ret = logical_vebox_ring_init(dev);
+               ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
                if (ret)
-                       goto cleanup_blt_ring;
+                       goto cleanup;
+
+               mask |= ENGINE_MASK(i);
        }
 
-       if (HAS_BSD2(dev)) {
-               ret = logical_bsd2_ring_init(dev);
-               if (ret)
-                       goto cleanup_vebox_ring;
+       /*
+        * Catch failures to update logical_rings table when the new engines
+        * are added to the driver by a warning and disabling the forgotten
+        * engines.
+        */
+       if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
+               struct intel_device_info *info =
+                       (struct intel_device_info *)&dev_priv->info;
+               info->ring_mask = mask;
        }
 
        return 0;
 
-cleanup_vebox_ring:
-       intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
-cleanup_blt_ring:
-       intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
-cleanup_bsd_ring:
-       intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
-cleanup_render_ring:
-       intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
+cleanup:
+       for (i = 0; i < I915_NUM_ENGINES; i++)
+               intel_logical_ring_cleanup(&dev_priv->engine[i]);
 
        return ret;
 }
 
 static u32
-make_rpcs(struct drm_device *dev)
+make_rpcs(struct drm_i915_private *dev_priv)
 {
        u32 rpcs = 0;
 
@@ -2240,7 +2250,7 @@ make_rpcs(struct drm_device *dev)
         * No explicit RPCS request is needed to ensure full
         * slice/subslice/EU enablement prior to Gen9.
        */
-       if (INTEL_INFO(dev)->gen < 9)
+       if (INTEL_GEN(dev_priv) < 9)
                return 0;
 
        /*
@@ -2249,24 +2259,24 @@ make_rpcs(struct drm_device *dev)
         * must make an explicit request through RPCS for full
         * enablement.
        */
-       if (INTEL_INFO(dev)->has_slice_pg) {
+       if (INTEL_INFO(dev_priv)->has_slice_pg) {
                rpcs |= GEN8_RPCS_S_CNT_ENABLE;
-               rpcs |= INTEL_INFO(dev)->slice_total <<
+               rpcs |= INTEL_INFO(dev_priv)->slice_total <<
                        GEN8_RPCS_S_CNT_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
 
-       if (INTEL_INFO(dev)->has_subslice_pg) {
+       if (INTEL_INFO(dev_priv)->has_subslice_pg) {
                rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
-               rpcs |= INTEL_INFO(dev)->subslice_per_slice <<
+               rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
                        GEN8_RPCS_SS_CNT_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
 
-       if (INTEL_INFO(dev)->has_eu_pg) {
-               rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+       if (INTEL_INFO(dev_priv)->has_eu_pg) {
+               rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
                        GEN8_RPCS_EU_MIN_SHIFT;
-               rpcs |= INTEL_INFO(dev)->eu_per_subslice <<
+               rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
                        GEN8_RPCS_EU_MAX_SHIFT;
                rpcs |= GEN8_RPCS_ENABLE;
        }
@@ -2278,9 +2288,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 {
        u32 indirect_ctx_offset;
 
-       switch (INTEL_INFO(engine->dev)->gen) {
+       switch (INTEL_GEN(engine->i915)) {
        default:
-               MISSING_CASE(INTEL_INFO(engine->dev)->gen);
+               MISSING_CASE(INTEL_GEN(engine->i915));
                /* fall through */
        case 9:
                indirect_ctx_offset =
@@ -2296,13 +2306,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 }
 
 static int
-populate_lr_context(struct intel_context *ctx,
+populate_lr_context(struct i915_gem_context *ctx,
                    struct drm_i915_gem_object *ctx_obj,
                    struct intel_engine_cs *engine,
                    struct intel_ringbuffer *ringbuf)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = ctx->i915;
        struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
        void *vaddr;
        u32 *reg_state;
@@ -2340,7 +2349,7 @@ populate_lr_context(struct intel_context *ctx,
                       RING_CONTEXT_CONTROL(engine),
                       _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
                                          CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
-                                         (HAS_RESOURCE_STREAMER(dev) ?
+                                         (HAS_RESOURCE_STREAMER(dev_priv) ?
                                            CTX_CTRL_RS_CTX_ENABLE : 0)));
        ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
                       0);
@@ -2429,7 +2438,7 @@ populate_lr_context(struct intel_context *ctx,
        if (engine->id == RCS) {
                reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
                ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
-                              make_rpcs(dev));
+                              make_rpcs(dev_priv));
        }
 
        i915_gem_object_unpin_map(ctx_obj);
@@ -2437,40 +2446,9 @@ populate_lr_context(struct intel_context *ctx,
        return 0;
 }
 
-/**
- * intel_lr_context_free() - free the LRC specific bits of a context
- * @ctx: the LR context to free.
- *
- * The real context freeing is done in i915_gem_context_free: this only
- * takes care of the bits that are LRC related: the per-engine backing
- * objects and the logical ringbuffer.
- */
-void intel_lr_context_free(struct intel_context *ctx)
-{
-       int i;
-
-       for (i = I915_NUM_ENGINES; --i >= 0; ) {
-               struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
-               struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
-
-               if (!ctx_obj)
-                       continue;
-
-               if (ctx == ctx->i915->kernel_context) {
-                       intel_unpin_ringbuffer_obj(ringbuf);
-                       i915_gem_object_ggtt_unpin(ctx_obj);
-                       i915_gem_object_unpin_map(ctx_obj);
-               }
-
-               WARN_ON(ctx->engine[i].pin_count);
-               intel_ringbuffer_free(ringbuf);
-               drm_gem_object_unreference(&ctx_obj->base);
-       }
-}
-
 /**
  * intel_lr_context_size() - return the size of the context for an engine
- * @ring: which engine to find the context size for
+ * @engine: which engine to find the context size for
  *
  * Each engine may require a different amount of space for a context image,
  * so when allocating (or copying) an image, this function can be used to
@@ -2486,11 +2464,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
 {
        int ret = 0;
 
-       WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
+       WARN_ON(INTEL_GEN(engine->i915) < 8);
 
        switch (engine->id) {
        case RCS:
-               if (INTEL_INFO(engine->dev)->gen >= 9)
+               if (INTEL_GEN(engine->i915) >= 9)
                        ret = GEN9_LR_CONTEXT_RENDER_SIZE;
                else
                        ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2507,9 +2485,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
 }
 
 /**
- * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
+ * execlists_context_deferred_alloc() - create the LRC specific bits of a context
  * @ctx: LR context to create.
- * @ring: engine to be used with the context.
+ * @engine: engine to be used with the context.
  *
  * This function can be called more than once, with different engines, if we plan
  * to use the context with them. The context backing objects and the ringbuffers
@@ -2519,31 +2497,29 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
  *
  * Return: non-zero on error.
  */
-
-int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-                                   struct intel_engine_cs *engine)
+static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+                                           struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
        struct drm_i915_gem_object *ctx_obj;
+       struct intel_context *ce = &ctx->engine[engine->id];
        uint32_t context_size;
        struct intel_ringbuffer *ringbuf;
        int ret;
 
-       WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
-       WARN_ON(ctx->engine[engine->id].state);
+       WARN_ON(ce->state);
 
        context_size = round_up(intel_lr_context_size(engine), 4096);
 
        /* One extra page as the sharing data between driver and GuC */
        context_size += PAGE_SIZE * LRC_PPHWSP_PN;
 
-       ctx_obj = i915_gem_alloc_object(dev, context_size);
-       if (!ctx_obj) {
+       ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
+       if (IS_ERR(ctx_obj)) {
                DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
-               return -ENOMEM;
+               return PTR_ERR(ctx_obj);
        }
 
-       ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
+       ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
        if (IS_ERR(ringbuf)) {
                ret = PTR_ERR(ringbuf);
                goto error_deref_obj;
@@ -2555,48 +2531,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                goto error_ringbuf;
        }
 
-       ctx->engine[engine->id].ringbuf = ringbuf;
-       ctx->engine[engine->id].state = ctx_obj;
+       ce->ringbuf = ringbuf;
+       ce->state = ctx_obj;
+       ce->initialised = engine->init_context == NULL;
 
-       if (ctx != ctx->i915->kernel_context && engine->init_context) {
-               struct drm_i915_gem_request *req;
-
-               req = i915_gem_request_alloc(engine, ctx);
-               if (IS_ERR(req)) {
-                       ret = PTR_ERR(req);
-                       DRM_ERROR("ring create req: %d\n", ret);
-                       goto error_ringbuf;
-               }
-
-               ret = engine->init_context(req);
-               i915_add_request_no_flush(req);
-               if (ret) {
-                       DRM_ERROR("ring init context: %d\n",
-                               ret);
-                       goto error_ringbuf;
-               }
-       }
        return 0;
 
 error_ringbuf:
        intel_ringbuffer_free(ringbuf);
 error_deref_obj:
        drm_gem_object_unreference(&ctx_obj->base);
-       ctx->engine[engine->id].ringbuf = NULL;
-       ctx->engine[engine->id].state = NULL;
+       ce->ringbuf = NULL;
+       ce->state = NULL;
        return ret;
 }
 
 void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-                           struct intel_context *ctx)
+                           struct i915_gem_context *ctx)
 {
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv) {
-               struct drm_i915_gem_object *ctx_obj =
-                               ctx->engine[engine->id].state;
-               struct intel_ringbuffer *ringbuf =
-                               ctx->engine[engine->id].ringbuf;
+               struct intel_context *ce = &ctx->engine[engine->id];
+               struct drm_i915_gem_object *ctx_obj = ce->state;
                void *vaddr;
                uint32_t *reg_state;
 
@@ -2615,7 +2572,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
 
                i915_gem_object_unpin_map(ctx_obj);
 
-               ringbuf->head = 0;
-               ringbuf->tail = 0;
+               ce->ringbuf->head = 0;
+               ce->ringbuf->tail = 0;
        }
 }
index 60a7385bc531639090ac2ced24b2eb6c4003680a..2b8255c19dccf59e1dfee6c6614673676c166cb1 100644 (file)
 #define GEN8_CSB_READ_PTR(csb_status) \
        (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
 
+enum {
+       INTEL_CONTEXT_SCHEDULE_IN = 0,
+       INTEL_CONTEXT_SCHEDULE_OUT,
+};
+
 /* Logical Rings */
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
@@ -99,30 +104,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
 #define LRC_PPHWSP_PN  (LRC_GUCSHR_PN + 1)
 #define LRC_STATE_PN   (LRC_PPHWSP_PN + 1)
 
-void intel_lr_context_free(struct intel_context *ctx);
+struct i915_gem_context;
+
 uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
-int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-                                   struct intel_engine_cs *engine);
-void intel_lr_context_unpin(struct intel_context *ctx,
+void intel_lr_context_unpin(struct i915_gem_context *ctx,
                            struct intel_engine_cs *engine);
 
 struct drm_i915_private;
 
 void intel_lr_context_reset(struct drm_i915_private *dev_priv,
-                           struct intel_context *ctx);
-uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
+                           struct i915_gem_context *ctx);
+uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
                                     struct intel_engine_cs *engine);
 
-u32 intel_execlists_ctx_id(struct intel_context *ctx,
-                          struct intel_engine_cs *engine);
-
 /* Execlists */
-int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
+                                   int enable_execlists);
 struct i915_execbuffer_params;
 int intel_execlists_submission(struct i915_execbuffer_params *params,
                               struct drm_i915_gem_execbuffer2 *args,
                               struct list_head *vmas);
 
-void intel_execlists_retire_requests(struct intel_engine_cs *engine);
+void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
 
 #endif /* _INTEL_LRC_H_ */
index bc53c0dd34d082cc53ad2ca4b398b9d5ba85a1f5..49550470483ea6aaacfae338052fe1536ae09d1d 100644 (file)
@@ -72,7 +72,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        enum intel_display_power_domain power_domain;
        u32 tmp;
@@ -106,7 +106,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
                                  struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        u32 tmp, flags = 0;
 
@@ -140,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        int pipe = crtc->pipe;
@@ -184,13 +184,13 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
         * panels behave in the two modes. For now, let's just maintain the
         * value we got from the BIOS.
         */
-        temp &= ~LVDS_A3_POWER_MASK;
-        temp |= lvds_encoder->a3_power;
+       temp &= ~LVDS_A3_POWER_MASK;
+       temp |= lvds_encoder->a3_power;
 
        /* Set the dithering flag on LVDS as needed, note that there is no
         * special lvds dither control bit on pch-split platforms, dithering is
         * only controlled through the PIPECONF reg. */
-       if (INTEL_INFO(dev)->gen == 4) {
+       if (IS_GEN4(dev_priv)) {
                /* Bspec wording suggests that LVDS port dithering only exists
                 * for 18bpp panels. */
                if (crtc->config->dither && crtc->config->pipe_bpp == 18)
@@ -216,7 +216,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct intel_connector *intel_connector =
                &lvds_encoder->attached_connector->base;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t ctl_reg, stat_reg;
 
        if (HAS_PCH_SPLIT(dev)) {
@@ -231,7 +231,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
 
        I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
        POSTING_READ(lvds_encoder->reg);
-       if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
+       if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
                DRM_ERROR("timed out waiting for panel to power on\n");
 
        intel_panel_enable_backlight(intel_connector);
@@ -241,7 +241,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t ctl_reg, stat_reg;
 
        if (HAS_PCH_SPLIT(dev)) {
@@ -253,7 +253,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
        }
 
        I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
-       if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
+       if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
                DRM_ERROR("timed out waiting for panel to power off\n");
 
        I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -442,7 +442,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
                container_of(nb, struct intel_lvds_connector, lid_notifier);
        struct drm_connector *connector = &lvds_connector->base.base;
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
                return NOTIFY_OK;
@@ -547,7 +547,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
        .get_modes = intel_lvds_get_modes,
        .mode_valid = intel_lvds_mode_valid,
-       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -556,6 +555,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_lvds_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_connector_register,
+       .early_unregister = intel_connector_unregister,
        .destroy = intel_lvds_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -810,27 +811,29 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
        { }     /* terminating entry */
 };
 
-bool intel_is_dual_link_lvds(struct drm_device *dev)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
 {
-       struct intel_encoder *encoder;
-       struct intel_lvds_encoder *lvds_encoder;
+       struct intel_encoder *intel_encoder;
 
-       for_each_intel_encoder(dev, encoder) {
-               if (encoder->type == INTEL_OUTPUT_LVDS) {
-                       lvds_encoder = to_lvds_encoder(&encoder->base);
+       for_each_intel_encoder(dev, intel_encoder)
+               if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+                       return intel_encoder;
 
-                       return lvds_encoder->is_dual_link;
-               }
-       }
+       return NULL;
+}
 
-       return false;
+bool intel_is_dual_link_lvds(struct drm_device *dev)
+{
+       struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
+
+       return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
 }
 
 static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
 {
        struct drm_device *dev = lvds_encoder->base.base.dev;
        unsigned int val;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* use the module option value if specified */
        if (i915.lvds_channel_mode > 0)
@@ -880,7 +883,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
  */
 void intel_lvds_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_lvds_encoder *lvds_encoder;
        struct intel_encoder *intel_encoder;
        struct intel_lvds_connector *lvds_connector;
@@ -978,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
                           DRM_MODE_CONNECTOR_LVDS);
 
        drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
-                        DRM_MODE_ENCODER_LVDS, NULL);
+                        DRM_MODE_ENCODER_LVDS, "LVDS");
 
        intel_encoder->enable = intel_enable_lvds;
        intel_encoder->pre_enable = intel_pre_enable_lvds;
@@ -992,7 +995,6 @@ void intel_lvds_init(struct drm_device *dev)
        intel_encoder->get_hw_state = intel_lvds_get_hw_state;
        intel_encoder->get_config = intel_lvds_get_config;
        intel_connector->get_hw_state = intel_connector_get_hw_state;
-       intel_connector->unregister = intel_connector_unregister;
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        intel_encoder->type = INTEL_OUTPUT_LVDS;
@@ -1082,6 +1084,8 @@ void intel_lvds_init(struct drm_device *dev)
                fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
                if (fixed_mode) {
                        fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+                       connector->display_info.width_mm = fixed_mode->width_mm;
+                       connector->display_info.height_mm = fixed_mode->height_mm;
                        goto out;
                }
        }
@@ -1117,6 +1121,7 @@ out:
        mutex_unlock(&dev->mode_config.mutex);
 
        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+       intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
        DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
@@ -1129,9 +1134,6 @@ out:
                DRM_DEBUG_KMS("lid notifier registration failed\n");
                lvds_connector->lid_notifier.notifier_call = NULL;
        }
-       drm_connector_register(connector);
-
-       intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        return;
 
index 6ba4bf7f2a89bc45eef78b49e42f336666fafc3a..927825f5b28447f95fd021ca5af9c62f95eda4a3 100644 (file)
@@ -66,9 +66,10 @@ struct drm_i915_mocs_table {
 #define L3_WB                  3
 
 /* Target cache */
-#define ELLC                   0
-#define LLC                    1
-#define LLC_ELLC               2
+#define LE_TC_PAGETABLE                0
+#define LE_TC_LLC              1
+#define LE_TC_LLC_ELLC         2
+#define LE_TC_LLC_ELLC_ALT     3
 
 /*
  * MOCS tables
@@ -96,34 +97,67 @@ struct drm_i915_mocs_table {
  *       end.
  */
 static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
-       /* { 0x00000009, 0x0010 } */
-       { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
-       /* { 0x00000038, 0x0030 } */
-       { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
-       /* { 0x0000003b, 0x0030 } */
-       { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+       { /* 0x00000009 */
+         .control_value = LE_CACHEABILITY(LE_UC) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+
+         /* 0x0010 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
+       },
+       {
+         /* 0x00000038 */
+         .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+         /* 0x0030 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+       },
+       {
+         /* 0x0000003b */
+         .control_value = LE_CACHEABILITY(LE_WB) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+         /* 0x0030 */
+         .l3cc_value =   L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+       },
 };
 
 /* NOTE: the LE_TGT_CACHE is not used on Broxton */
 static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
-       /* { 0x00000009, 0x0010 } */
-       { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
-       /* { 0x00000038, 0x0030 } */
-       { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
-       /* { 0x0000003b, 0x0030 } */
-       { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
-          LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
-         (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
+       {
+         /* 0x00000009 */
+         .control_value = LE_CACHEABILITY(LE_UC) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+
+         /* 0x0010 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
+       },
+       {
+         /* 0x00000038 */
+         .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+
+         /* 0x0030 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+       },
+       {
+         /* 0x00000039 */
+         .control_value = LE_CACHEABILITY(LE_UC) |
+                          LE_TGT_CACHE(LE_TC_LLC_ELLC) |
+                          LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
+                          LE_PFM(0) | LE_SCF(0),
+
+         /* 0x0030 */
+         .l3cc_value =    L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
+       },
 };
 
 /**
@@ -156,6 +190,16 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
                          "Platform that should have a MOCS table does not.\n");
        }
 
+       /* WaDisableSkipCaching:skl,bxt,kbl */
+       if (IS_GEN9(dev_priv)) {
+               int i;
+
+               for (i = 0; i < table->size; i++)
+                       if (WARN_ON(table->table[i].l3cc_value &
+                                   (L3_ESC(1) | L3_SCC(0x7))))
+                               return false;
+       }
+
        return result;
 }
 
@@ -189,7 +233,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
  */
 int intel_mocs_init_engine(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
        struct drm_i915_mocs_table table;
        unsigned int index;
 
index 38a4c8ce7e63a5da4506094d682ea06310cc9de7..f2584d0a01ab61cf09bc5561071e0c7d22130ffb 100644 (file)
@@ -82,7 +82,7 @@ void
 intel_attach_force_audio_property(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_property *prop;
 
        prop = dev_priv->force_audio_property;
@@ -109,7 +109,7 @@ void
 intel_attach_broadcast_rgb_property(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_property *prop;
 
        prop = dev_priv->broadcast_rgb_property;
index 99e26034ae8d0df783d706839b67b0fd2f161585..adca262d591ac02c46577ee05fa6741741433d41 100644 (file)
@@ -232,18 +232,36 @@ struct opregion_asle_ext {
 #define SWSCI_SBCB_POST_VBE_PM         SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
 #define SWSCI_SBCB_ENABLE_DISABLE_AUDIO        SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
 
-#define ACPI_OTHER_OUTPUT (0<<8)
-#define ACPI_VGA_OUTPUT (1<<8)
-#define ACPI_TV_OUTPUT (2<<8)
-#define ACPI_DIGITAL_OUTPUT (3<<8)
-#define ACPI_LVDS_OUTPUT (4<<8)
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT               0
+#define ACPI_DISPLAY_INDEX_MASK                        (0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT     4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK      (0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT                        8
+#define ACPI_DISPLAY_TYPE_MASK                 (0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER                        (0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA                  (1 << 8)
+#define ACPI_DISPLAY_TYPE_TV                   (2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL     (3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL     (4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT             12
+#define ACPI_VENDOR_SPECIFIC_MASK              (0xf << 12)
+#define ACPI_BIOS_CAN_DETECT                   (1 << 16)
+#define ACPI_DEPENDS_ON_VGA                    (1 << 17)
+#define ACPI_PIPE_ID_SHIFT                     18
+#define ACPI_PIPE_ID_MASK                      (7 << 18)
+#define ACPI_DEVICE_ID_SCHEME                  (1 << 31)
 
 #define MAX_DSLP       1500
 
-static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+static int swsci(struct drm_i915_private *dev_priv,
+                u32 function, u32 parm, u32 *parm_out)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct opregion_swsci *swsci = dev_priv->opregion.swsci;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        u32 main_function, sub_function, scic;
        u16 swsci_val;
        u32 dslp;
@@ -293,16 +311,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
        swsci->scic = scic;
 
        /* Ensure SCI event is selected and event trigger is cleared. */
-       pci_read_config_word(dev->pdev, SWSCI, &swsci_val);
+       pci_read_config_word(pdev, SWSCI, &swsci_val);
        if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
                swsci_val |= SWSCI_SCISEL;
                swsci_val &= ~SWSCI_GSSCIE;
-               pci_write_config_word(dev->pdev, SWSCI, swsci_val);
+               pci_write_config_word(pdev, SWSCI, swsci_val);
        }
 
        /* Use event trigger to tell bios to check the mail. */
        swsci_val |= SWSCI_GSSCIE;
-       pci_write_config_word(dev->pdev, SWSCI, swsci_val);
+       pci_write_config_word(pdev, SWSCI, swsci_val);
 
        /* Poll for the result. */
 #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -336,13 +354,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
 int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
                                  bool enable)
 {
-       struct drm_device *dev = intel_encoder->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
        u32 parm = 0;
        u32 type = 0;
        u32 port;
 
        /* don't care about old stuff for now */
-       if (!HAS_DDI(dev))
+       if (!HAS_DDI(dev_priv))
                return 0;
 
        if (intel_encoder->type == INTEL_OUTPUT_DSI)
@@ -365,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
                type = DISPLAY_TYPE_CRT;
                break;
        case INTEL_OUTPUT_UNKNOWN:
-       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_DP:
        case INTEL_OUTPUT_HDMI:
        case INTEL_OUTPUT_DP_MST:
                type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
@@ -382,7 +400,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
 
        parm |= type << (16 + port * 3);
 
-       return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+       return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
 }
 
 static const struct {
@@ -396,27 +414,28 @@ static const struct {
        { PCI_D3cold,   0x04 },
 };
 
-int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+                                 pci_power_t state)
 {
        int i;
 
-       if (!HAS_DDI(dev))
+       if (!HAS_DDI(dev_priv))
                return 0;
 
        for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
                if (state == power_state_map[i].pci_power_state)
-                       return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+                       return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
                                     power_state_map[i].parm, NULL);
        }
 
        return -EINVAL;
 }
 
-static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_connector *connector;
        struct opregion_asle *asle = dev_priv->opregion.asle;
+       struct drm_device *dev = &dev_priv->drm;
 
        DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
@@ -449,7 +468,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
        return 0;
 }
 
-static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
 {
        /* alsi is the current ALS reading in lux. 0 indicates below sensor
           range, 0xffff indicates above sensor range. 1-0xfffe are valid */
@@ -457,13 +476,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
        return ASLC_ALS_ILLUM_FAILED;
 }
 
-static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
 {
        DRM_DEBUG_DRIVER("PWM freq is not supported\n");
        return ASLC_PWM_FREQ_FAILED;
 }
 
-static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
 {
        /* Panel fitting is currently controlled by the X code, so this is a
           noop until modesetting support works fully */
@@ -471,13 +490,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
        return ASLC_PFIT_FAILED;
 }
 
-static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
 {
        DRM_DEBUG_DRIVER("SROT is not supported\n");
        return ASLC_ROTATION_ANGLES_FAILED;
 }
 
-static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
 {
        if (!iuer)
                DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
@@ -495,7 +514,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
        return ASLC_BUTTON_ARRAY_FAILED;
 }
 
-static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
 {
        if (iuer & ASLE_IUER_CONVERTIBLE)
                DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
@@ -505,7 +524,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
        return ASLC_CONVERTIBLE_FAILED;
 }
 
-static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
 {
        if (iuer & ASLE_IUER_DOCKING)
                DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
@@ -515,7 +534,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
        return ASLC_DOCKING_FAILED;
 }
 
-static u32 asle_isct_state(struct drm_device *dev)
+static u32 asle_isct_state(struct drm_i915_private *dev_priv)
 {
        DRM_DEBUG_DRIVER("ISCT is not supported\n");
        return ASLC_ISCT_STATE_FAILED;
@@ -527,7 +546,6 @@ static void asle_work(struct work_struct *work)
                container_of(work, struct intel_opregion, asle_work);
        struct drm_i915_private *dev_priv =
                container_of(opregion, struct drm_i915_private, opregion);
-       struct drm_device *dev = dev_priv->dev;
        struct opregion_asle *asle = dev_priv->opregion.asle;
        u32 aslc_stat = 0;
        u32 aslc_req;
@@ -544,40 +562,38 @@ static void asle_work(struct work_struct *work)
        }
 
        if (aslc_req & ASLC_SET_ALS_ILLUM)
-               aslc_stat |= asle_set_als_illum(dev, asle->alsi);
+               aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
 
        if (aslc_req & ASLC_SET_BACKLIGHT)
-               aslc_stat |= asle_set_backlight(dev, asle->bclp);
+               aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
 
        if (aslc_req & ASLC_SET_PFIT)
-               aslc_stat |= asle_set_pfit(dev, asle->pfit);
+               aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
 
        if (aslc_req & ASLC_SET_PWM_FREQ)
-               aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+               aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
 
        if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
-               aslc_stat |= asle_set_supported_rotation_angles(dev,
+               aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
                                                        asle->srot);
 
        if (aslc_req & ASLC_BUTTON_ARRAY)
-               aslc_stat |= asle_set_button_array(dev, asle->iuer);
+               aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
 
        if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
-               aslc_stat |= asle_set_convertible(dev, asle->iuer);
+               aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
 
        if (aslc_req & ASLC_DOCKING_INDICATOR)
-               aslc_stat |= asle_set_docking(dev, asle->iuer);
+               aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
 
        if (aslc_req & ASLC_ISCT_STATE_CHANGE)
-               aslc_stat |= asle_isct_state(dev);
+               aslc_stat |= asle_isct_state(dev_priv);
 
        asle->aslc = aslc_stat;
 }
 
-void intel_opregion_asle_intr(struct drm_device *dev)
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        if (dev_priv->opregion.asle)
                schedule_work(&dev_priv->opregion.asle_work);
 }
@@ -658,10 +674,51 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
        }
 }
 
-static void intel_didl_outputs(struct drm_device *dev)
+static u32 acpi_display_type(struct drm_connector *connector)
+{
+       u32 display_type;
+
+       switch (connector->connector_type) {
+       case DRM_MODE_CONNECTOR_VGA:
+       case DRM_MODE_CONNECTOR_DVIA:
+               display_type = ACPI_DISPLAY_TYPE_VGA;
+               break;
+       case DRM_MODE_CONNECTOR_Composite:
+       case DRM_MODE_CONNECTOR_SVIDEO:
+       case DRM_MODE_CONNECTOR_Component:
+       case DRM_MODE_CONNECTOR_9PinDIN:
+       case DRM_MODE_CONNECTOR_TV:
+               display_type = ACPI_DISPLAY_TYPE_TV;
+               break;
+       case DRM_MODE_CONNECTOR_DVII:
+       case DRM_MODE_CONNECTOR_DVID:
+       case DRM_MODE_CONNECTOR_DisplayPort:
+       case DRM_MODE_CONNECTOR_HDMIA:
+       case DRM_MODE_CONNECTOR_HDMIB:
+               display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+               break;
+       case DRM_MODE_CONNECTOR_LVDS:
+       case DRM_MODE_CONNECTOR_eDP:
+       case DRM_MODE_CONNECTOR_DSI:
+               display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+               break;
+       case DRM_MODE_CONNECTOR_Unknown:
+       case DRM_MODE_CONNECTOR_VIRTUAL:
+               display_type = ACPI_DISPLAY_TYPE_OTHER;
+               break;
+       default:
+               MISSING_CASE(connector->connector_type);
+               display_type = ACPI_DISPLAY_TYPE_OTHER;
+               break;
+       }
+
+       return display_type;
+}
+
+static void intel_didl_outputs(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_opregion *opregion = &dev_priv->opregion;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_connector *connector;
        acpi_handle handle;
        struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -670,7 +727,7 @@ static void intel_didl_outputs(struct drm_device *dev)
        u32 temp, max_outputs;
        int i = 0;
 
-       handle = ACPI_HANDLE(&dev->pdev->dev);
+       handle = ACPI_HANDLE(&pdev->dev);
        if (!handle || acpi_bus_get_device(handle, &acpi_dev))
                return;
 
@@ -725,45 +782,25 @@ end:
 
 blind_set:
        i = 0;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               int output_type = ACPI_OTHER_OUTPUT;
+       list_for_each_entry(connector,
+                           &dev_priv->drm.mode_config.connector_list, head) {
+               int display_type = acpi_display_type(connector);
+
                if (i >= max_outputs) {
                        DRM_DEBUG_KMS("More than %u outputs in connector list\n",
                                      max_outputs);
                        return;
                }
-               switch (connector->connector_type) {
-               case DRM_MODE_CONNECTOR_VGA:
-               case DRM_MODE_CONNECTOR_DVIA:
-                       output_type = ACPI_VGA_OUTPUT;
-                       break;
-               case DRM_MODE_CONNECTOR_Composite:
-               case DRM_MODE_CONNECTOR_SVIDEO:
-               case DRM_MODE_CONNECTOR_Component:
-               case DRM_MODE_CONNECTOR_9PinDIN:
-                       output_type = ACPI_TV_OUTPUT;
-                       break;
-               case DRM_MODE_CONNECTOR_DVII:
-               case DRM_MODE_CONNECTOR_DVID:
-               case DRM_MODE_CONNECTOR_DisplayPort:
-               case DRM_MODE_CONNECTOR_HDMIA:
-               case DRM_MODE_CONNECTOR_HDMIB:
-                       output_type = ACPI_DIGITAL_OUTPUT;
-                       break;
-               case DRM_MODE_CONNECTOR_LVDS:
-                       output_type = ACPI_LVDS_OUTPUT;
-                       break;
-               }
+
                temp = get_did(opregion, i);
-               set_did(opregion, i, temp | (1 << 31) | output_type | i);
+               set_did(opregion, i, temp | (1 << 31) | display_type | i);
                i++;
        }
        goto end;
 }
 
-static void intel_setup_cadls(struct drm_device *dev)
+static void intel_setup_cadls(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_opregion *opregion = &dev_priv->opregion;
        int i = 0;
        u32 disp_id;
@@ -780,17 +817,16 @@ static void intel_setup_cadls(struct drm_device *dev)
        } while (++i < 8 && disp_id != 0);
 }
 
-void intel_opregion_init(struct drm_device *dev)
+void intel_opregion_register(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_opregion *opregion = &dev_priv->opregion;
 
        if (!opregion->header)
                return;
 
        if (opregion->acpi) {
-               intel_didl_outputs(dev);
-               intel_setup_cadls(dev);
+               intel_didl_outputs(dev_priv);
+               intel_setup_cadls(dev_priv);
 
                /* Notify BIOS we are ready to handle ACPI video ext notifs.
                 * Right now, all the events are handled by the ACPI video module.
@@ -808,9 +844,8 @@ void intel_opregion_init(struct drm_device *dev)
        }
 }
 
-void intel_opregion_fini(struct drm_device *dev)
+void intel_opregion_unregister(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_opregion *opregion = &dev_priv->opregion;
 
        if (!opregion->header)
@@ -842,9 +877,8 @@ void intel_opregion_fini(struct drm_device *dev)
        opregion->lid_state = NULL;
 }
 
-static void swsci_setup(struct drm_device *dev)
+static void swsci_setup(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_opregion *opregion = &dev_priv->opregion;
        bool requested_callbacks = false;
        u32 tmp;
@@ -854,7 +888,7 @@ static void swsci_setup(struct drm_device *dev)
        opregion->swsci_sbcb_sub_functions = 1;
 
        /* We use GBDA to ask for supported GBDA calls. */
-       if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+       if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
                /* make the bits match the sub-function codes */
                tmp <<= 1;
                opregion->swsci_gbda_sub_functions |= tmp;
@@ -865,7 +899,7 @@ static void swsci_setup(struct drm_device *dev)
         * must not call interfaces that are not specifically requested by the
         * bios.
         */
-       if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+       if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
                /* here, the bits already match sub-function codes */
                opregion->swsci_sbcb_sub_functions |= tmp;
                requested_callbacks = true;
@@ -876,7 +910,7 @@ static void swsci_setup(struct drm_device *dev)
         * the callback is _requested_. But we still can't call interfaces that
         * are not requested.
         */
-       if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+       if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
                /* make the bits match the sub-function codes */
                u32 low = tmp & 0x7ff;
                u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -918,10 +952,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
        { }
 };
 
-int intel_opregion_setup(struct drm_device *dev)
+int intel_opregion_setup(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_opregion *opregion = &dev_priv->opregion;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        u32 asls, mboxes;
        char buf[sizeof(OPREGION_SIGNATURE)];
        int err = 0;
@@ -933,7 +967,7 @@ int intel_opregion_setup(struct drm_device *dev)
        BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
        BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
 
-       pci_read_config_dword(dev->pdev, ASLS, &asls);
+       pci_read_config_dword(pdev, ASLS, &asls);
        DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
        if (asls == 0) {
                DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
@@ -965,7 +999,7 @@ int intel_opregion_setup(struct drm_device *dev)
        if (mboxes & MBOX_SWSCI) {
                DRM_DEBUG_DRIVER("SWSCI supported\n");
                opregion->swsci = base + OPREGION_SWSCI_OFFSET;
-               swsci_setup(dev);
+               swsci_setup(dev_priv);
        }
 
        if (mboxes & MBOX_ASLE) {
@@ -1014,12 +1048,12 @@ err_out:
 }
 
 int
-intel_opregion_get_panel_type(struct drm_device *dev)
+intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
 {
        u32 panel_details;
        int ret;
 
-       ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+       ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
        if (ret) {
                DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
                              ret);
@@ -1038,5 +1072,16 @@ intel_opregion_get_panel_type(struct drm_device *dev)
                return -ENODEV;
        }
 
+       /*
+        * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
+        * low vswing for eDP, whereas the VBT panel type (2) gives us normal
+        * vswing instead. Low vswing results in some display flickers, so
+        * let's simply ignore the OpRegion panel type on SKL for now.
+        */
+       if (IS_SKYLAKE(dev_priv)) {
+               DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+               return -ENODEV;
+       }
+
        return ret - 1;
 }
index bd38e49f733406612cac8a72e3efb5a43b3516a2..3212d8806b5ad13b0f88cc1c1f56bc4ef2d953b8 100644 (file)
@@ -168,7 +168,7 @@ struct overlay_registers {
 };
 
 struct intel_overlay {
-       struct drm_device *dev;
+       struct drm_i915_private *i915;
        struct intel_crtc *crtc;
        struct drm_i915_gem_object *vid_bo;
        struct drm_i915_gem_object *old_vid_bo;
@@ -190,15 +190,15 @@ struct intel_overlay {
 static struct overlay_registers __iomem *
 intel_overlay_map_regs(struct intel_overlay *overlay)
 {
-       struct drm_i915_private *dev_priv = to_i915(overlay->dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct overlay_registers __iomem *regs;
 
-       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_wc(ggtt->mappable,
-                                        i915_gem_obj_ggtt_offset(overlay->reg_bo));
+               regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+                                        overlay->flip_addr,
+                                        PAGE_SIZE);
 
        return regs;
 }
@@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
 static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
                                     struct overlay_registers __iomem *regs)
 {
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
                io_mapping_unmap(regs);
 }
 
@@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 /* overlay needs to be disable in OCMD reg */
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        struct drm_i915_gem_request *req;
        int ret;
 
        WARN_ON(overlay->active);
-       WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
+       WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
        req = i915_gem_request_alloc(engine, NULL);
        if (IS_ERR(req))
@@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 static int intel_overlay_continue(struct intel_overlay *overlay,
                                  bool load_polyphase_filter)
 {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        struct drm_i915_gem_request *req;
        u32 flip_addr = overlay->flip_addr;
@@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
 /* overlay needs to be disabled in OCMD reg */
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        struct drm_i915_gem_request *req;
        u32 flip_addr = overlay->flip_addr;
@@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
        intel_ring_emit(engine, flip_addr);
        intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
        /* turn overlay off */
-       if (IS_I830(dev)) {
+       if (IS_I830(dev_priv)) {
                /* Workaround: Don't disable the overlay fully, since otherwise
                 * it dies on the next OVERLAY_ON cmd. */
                intel_ring_emit(engine, MI_NOOP);
@@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
  */
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
        int ret;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
        /* Only wait if there is actually an old frame to release to
         * guarantee forward progress.
@@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format)
        }
 }
 
-static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
 {
        u32 mask, shift, ret;
-       if (IS_GEN2(dev)) {
+       if (IS_GEN2(dev_priv)) {
                mask = 0x1f;
                shift = 5;
        } else {
@@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
                shift = 6;
        }
        ret = ((offset + width + mask) >> shift) - (offset >> shift);
-       if (!IS_GEN2(dev))
+       if (!IS_GEN2(dev_priv))
                ret <<= 1;
        ret -= 1;
        return ret << 2;
@@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        int ret, tmp_width;
        struct overlay_registers __iomem *regs;
        bool scale_changed = false;
-       struct drm_device *dev = overlay->dev;
+       struct drm_i915_private *dev_priv = overlay->i915;
        u32 swidth, swidthsw, sheight, ostride;
        enum pipe pipe = overlay->crtc->pipe;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
        ret = intel_overlay_release_old_vid(overlay);
        if (ret != 0)
@@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                        goto out_unpin;
                }
                oconfig = OCONF_CC_OUT_8BIT;
-               if (IS_GEN4(overlay->dev))
+               if (IS_GEN4(dev_priv))
                        oconfig |= OCONF_CSC_MODE_BT709;
                oconfig |= pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
@@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                tmp_width = params->src_w;
 
        swidth = params->src_w;
-       swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
+       swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
        sheight = params->src_h;
        iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
        ostride = params->stride_Y;
@@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                int uv_vscale = uv_vsubsampling(params->format);
                u32 tmp_U, tmp_V;
                swidth |= (params->src_w/uv_hscale) << 16;
-               tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+               tmp_U = calc_swidthsw(dev_priv, params->offset_U,
                                      params->src_w/uv_hscale);
-               tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+               tmp_V = calc_swidthsw(dev_priv, params->offset_V,
                                      params->src_w/uv_hscale);
                swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
                sheight |= (params->src_h/uv_vscale) << 16;
@@ -840,7 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        overlay->old_vid_bo = overlay->vid_bo;
        overlay->vid_bo = new_bo;
 
-       intel_frontbuffer_flip(dev,
+       intel_frontbuffer_flip(&dev_priv->drm,
                               INTEL_FRONTBUFFER_OVERLAY(pipe));
 
        return 0;
@@ -852,12 +848,12 @@ out_unpin:
 
 int intel_overlay_switch_off(struct intel_overlay *overlay)
 {
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct overlay_registers __iomem *regs;
-       struct drm_device *dev = overlay->dev;
        int ret;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+       WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
 
        ret = intel_overlay_recover_from_interrupt(overlay);
        if (ret != 0)
@@ -897,15 +893,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
 
 static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
 {
-       struct drm_device *dev = overlay->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = overlay->i915;
        u32 pfit_control = I915_READ(PFIT_CONTROL);
        u32 ratio;
 
        /* XXX: This is not the same logic as in the xorg driver, but more in
         * line with the intel documentation for the i965
         */
-       if (INTEL_INFO(dev)->gen >= 4) {
+       if (INTEL_GEN(dev_priv) >= 4) {
                /* on i965 use the PGM reg to read out the autoscaler values */
                ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
        } else {
@@ -948,7 +943,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
        return 0;
 }
 
-static int check_overlay_src(struct drm_device *dev,
+static int check_overlay_src(struct drm_i915_private *dev_priv,
                             struct drm_intel_overlay_put_image *rec,
                             struct drm_i915_gem_object *new_bo)
 {
@@ -959,7 +954,7 @@ static int check_overlay_src(struct drm_device *dev,
        u32 tmp;
 
        /* check src dimensions */
-       if (IS_845G(dev) || IS_I830(dev)) {
+       if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
                if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
                    rec->src_width  > IMAGE_MAX_WIDTH_LEGACY)
                        return -EINVAL;
@@ -1011,14 +1006,14 @@ static int check_overlay_src(struct drm_device *dev,
                return -EINVAL;
 
        /* stride checking */
-       if (IS_I830(dev) || IS_845G(dev))
+       if (IS_I830(dev_priv) || IS_845G(dev_priv))
                stride_mask = 255;
        else
                stride_mask = 63;
 
        if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
                return -EINVAL;
-       if (IS_GEN4(dev) && rec->stride_Y < 512)
+       if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
                return -EINVAL;
 
        tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1063,13 +1058,13 @@ static int check_overlay_src(struct drm_device *dev,
  * Return the pipe currently connected to the panel fitter,
  * or -1 if the panel fitter is not present or not in use
  */
-static int intel_panel_fitter_pipe(struct drm_device *dev)
+static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32  pfit_control;
 
        /* i830 doesn't have a panel fitter */
-       if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
+       if (INTEL_GEN(dev_priv) <= 3 &&
+           (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
                return -1;
 
        pfit_control = I915_READ(PFIT_CONTROL);
@@ -1079,18 +1074,18 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
                return -1;
 
        /* 965 can place panel fitter on either pipe */
-       if (IS_GEN4(dev))
+       if (IS_GEN4(dev_priv))
                return (pfit_control >> 29) & 0x3;
 
        /* older chips can only use pipe 1 */
        return 1;
 }
 
-int intel_overlay_put_image(struct drm_device *dev, void *data,
-                           struct drm_file *file_priv)
+int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_priv)
 {
        struct drm_intel_overlay_put_image *put_image_rec = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_overlay *overlay;
        struct drm_crtc *drmmode_crtc;
        struct intel_crtc *crtc;
@@ -1162,7 +1157,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
 
                /* line too wide, i.e. one-line-mode */
                if (mode->hdisplay > 1024 &&
-                   intel_panel_fitter_pipe(dev) == crtc->pipe) {
+                   intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
                        overlay->pfit_active = true;
                        update_pfit_vscale_ratio(overlay);
                } else
@@ -1196,7 +1191,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
                goto out_unlock;
        }
 
-       ret = check_overlay_src(dev, put_image_rec, new_bo);
+       ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
        if (ret != 0)
                goto out_unlock;
        params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@@ -1284,11 +1279,11 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
        return 0;
 }
 
-int intel_overlay_attrs(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv)
+int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv)
 {
        struct drm_intel_overlay_attrs *attrs = data;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_overlay *overlay;
        struct overlay_registers __iomem *regs;
        int ret;
@@ -1309,7 +1304,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
                attrs->contrast   = overlay->contrast;
                attrs->saturation = overlay->saturation;
 
-               if (!IS_GEN2(dev)) {
+               if (!IS_GEN2(dev_priv)) {
                        attrs->gamma0 = I915_READ(OGAMC0);
                        attrs->gamma1 = I915_READ(OGAMC1);
                        attrs->gamma2 = I915_READ(OGAMC2);
@@ -1341,7 +1336,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
                intel_overlay_unmap_regs(overlay, regs);
 
                if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
-                       if (IS_GEN2(dev))
+                       if (IS_GEN2(dev_priv))
                                goto out_unlock;
 
                        if (overlay->active) {
@@ -1371,37 +1366,37 @@ out_unlock:
        return ret;
 }
 
-void intel_setup_overlay(struct drm_device *dev)
+void intel_setup_overlay(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_overlay *overlay;
        struct drm_i915_gem_object *reg_bo;
        struct overlay_registers __iomem *regs;
        int ret;
 
-       if (!HAS_OVERLAY(dev))
+       if (!HAS_OVERLAY(dev_priv))
                return;
 
        overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
        if (!overlay)
                return;
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev_priv->drm.struct_mutex);
        if (WARN_ON(dev_priv->overlay))
                goto out_free;
 
-       overlay->dev = dev;
+       overlay->i915 = dev_priv;
 
        reg_bo = NULL;
-       if (!OVERLAY_NEEDS_PHYSICAL(dev))
-               reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
-       if (reg_bo == NULL)
-               reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
+       if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
+               reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
+                                                      PAGE_SIZE);
        if (reg_bo == NULL)
+               reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
+       if (IS_ERR(reg_bo))
                goto out_free;
        overlay->reg_bo = reg_bo;
 
-       if (OVERLAY_NEEDS_PHYSICAL(dev)) {
+       if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
                ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
                if (ret) {
                        DRM_ERROR("failed to attach phys overlay regs\n");
@@ -1441,25 +1436,23 @@ void intel_setup_overlay(struct drm_device *dev)
        intel_overlay_unmap_regs(overlay, regs);
 
        dev_priv->overlay = overlay;
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
        DRM_INFO("initialized overlay support\n");
        return;
 
 out_unpin_bo:
-       if (!OVERLAY_NEEDS_PHYSICAL(dev))
+       if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
                i915_gem_object_ggtt_unpin(reg_bo);
 out_free_bo:
        drm_gem_object_unreference(&reg_bo->base);
 out_free:
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
        kfree(overlay);
        return;
 }
 
-void intel_cleanup_overlay(struct drm_device *dev)
+void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        if (!dev_priv->overlay)
                return;
 
@@ -1482,18 +1475,17 @@ struct intel_overlay_error_state {
 static struct overlay_registers __iomem *
 intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
 {
-       struct drm_i915_private *dev_priv = to_i915(overlay->dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct drm_i915_private *dev_priv = overlay->i915;
        struct overlay_registers __iomem *regs;
 
-       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
                /* Cast to make sparse happy, but it's wc memory anyway, so
                 * equivalent to the wc io mapping on X86. */
                regs = (struct overlay_registers __iomem *)
                        overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_atomic_wc(ggtt->mappable,
-                                               i915_gem_obj_ggtt_offset(overlay->reg_bo));
+               regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+                                               overlay->flip_addr);
 
        return regs;
 }
@@ -1501,15 +1493,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
 static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
                                        struct overlay_registers __iomem *regs)
 {
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev))
+       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
                io_mapping_unmap_atomic(regs);
 }
 
-
 struct intel_overlay_error_state *
-intel_overlay_capture_error_state(struct drm_device *dev)
+intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_overlay *overlay = dev_priv->overlay;
        struct intel_overlay_error_state *error;
        struct overlay_registers __iomem *regs;
@@ -1523,10 +1513,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
 
        error->dovsta = I915_READ(DOVSTA);
        error->isr = I915_READ(ISR);
-       if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
-               error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
-       else
-               error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
+       error->base = overlay->flip_addr;
 
        regs = intel_overlay_map_regs_atomic(overlay);
        if (!regs)
index 8357d571553a56471ff42ab0f048bab7f52f475f..96c65d77e8866b0c25beadb46e342ecc8e37e9d6 100644 (file)
@@ -377,7 +377,7 @@ out:
 enum drm_connector_status
 intel_panel_detect(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Assume that the BIOS does not lie through the OpRegion... */
        if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
        if (panel->backlight.combination_mode) {
                u8 lbpc;
 
-               pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
+               pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
                val *= lbpc;
        }
 
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
 
                lbpc = level * 0xfe / panel->backlight.max + 1;
                level /= lbpc;
-               pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
+               pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
        }
 
        if (IS_GEN4(dev_priv)) {
@@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
         * backlight. This will leave the backlight on unnecessarily when
         * another client is not activated.
         */
-       if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+       if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
                DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
                return;
        }
@@ -1142,7 +1142,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
 {
        struct intel_connector *connector = bl_get_data(bd);
        struct drm_device *dev = connector->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 hw_level;
        int ret;
 
@@ -1163,7 +1163,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
        .get_brightness = intel_backlight_device_get_brightness,
 };
 
-static int intel_backlight_device_register(struct intel_connector *connector)
+int intel_backlight_device_register(struct intel_connector *connector)
 {
        struct intel_panel *panel = &connector->panel;
        struct backlight_properties props;
@@ -1216,7 +1216,7 @@ static int intel_backlight_device_register(struct intel_connector *connector)
        return 0;
 }
 
-static void intel_backlight_device_unregister(struct intel_connector *connector)
+void intel_backlight_device_unregister(struct intel_connector *connector)
 {
        struct intel_panel *panel = &connector->panel;
 
@@ -1225,14 +1225,6 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
                panel->backlight.device = NULL;
        }
 }
-#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
-{
-       return 0;
-}
-static void intel_backlight_device_unregister(struct intel_connector *connector)
-{
-}
 #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
 
 /*
@@ -1324,7 +1316,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
 {
        struct drm_device *dev = connector->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int clock;
 
        if (IS_G4X(dev_priv))
@@ -1724,6 +1716,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
                container_of(panel, struct intel_connector, panel);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
 
+       if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+           intel_dp_aux_init_backlight_funcs(connector) == 0)
+               return;
+
+       if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
+           intel_dsi_dcs_init_backlight_funcs(connector) == 0)
+               return;
+
        if (IS_BROXTON(dev_priv)) {
                panel->backlight.setup = bxt_setup_backlight;
                panel->backlight.enable = bxt_enable_backlight;
@@ -1731,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
                panel->backlight.set = bxt_set_backlight;
                panel->backlight.get = bxt_get_backlight;
                panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
-       } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
+       } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
+                  HAS_PCH_KBP(dev_priv)) {
                panel->backlight.setup = lpt_setup_backlight;
                panel->backlight.enable = lpt_enable_backlight;
                panel->backlight.disable = lpt_disable_backlight;
@@ -1804,19 +1805,3 @@ void intel_panel_fini(struct intel_panel *panel)
                drm_mode_destroy(intel_connector->base.dev,
                                panel->downclock_mode);
 }
-
-void intel_backlight_register(struct drm_device *dev)
-{
-       struct intel_connector *connector;
-
-       for_each_intel_connector(dev, connector)
-               intel_backlight_device_register(connector);
-}
-
-void intel_backlight_unregister(struct drm_device *dev)
-{
-       struct intel_connector *connector;
-
-       for_each_intel_connector(dev, connector)
-               intel_backlight_device_unregister(connector);
-}
index a7ef45da0a9e8dc517f13a66f0f6429aa6334609..f4f3fcc8b3becb59c0ed5d6bdd5dd27a177f3731 100644 (file)
@@ -26,6 +26,7 @@
  */
 
 #include <linux/cpufreq.h>
+#include <drm/drm_plane_helper.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
 #define INTEL_RC6p_ENABLE                      (1<<1)
 #define INTEL_RC6pp_ENABLE                     (1<<2)
 
-static void bxt_init_clock_gating(struct drm_device *dev)
+static void gen9_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
+       I915_WRITE(CHICKEN_PAR1_1,
+                  I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+
+       I915_WRITE(GEN8_CONFIG0,
+                  I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
+
+       /* WaEnableChickenDCPR:skl,bxt,kbl */
+       I915_WRITE(GEN8_CHICKEN_DCPR_1,
+                  I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+
+       /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
+       /* WaFbcWakeMemOn:skl,bxt,kbl */
+       I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+                  DISP_FBC_WM_DIS |
+                  DISP_FBC_MEMORY_WAKE);
+
+       /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
+       I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+                  ILK_DPFC_DISABLE_DUMMY0);
+}
+
+static void bxt_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       gen9_init_clock_gating(dev);
+
        /* WaDisableSDEUnitClockGating:bxt */
        I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
                   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -80,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev)
 
 static void i915_pineview_get_mem_freq(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp;
 
        tmp = I915_READ(CLKCFG);
@@ -119,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
 
 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u16 ddrpll, csipll;
 
        ddrpll = I915_READ16(DDRMPLL1);
@@ -290,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
 
 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        u32 val;
 
        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -346,7 +375,7 @@ static const int pessimal_latency_ns = 5000;
 static int vlv_get_fifo_size(struct drm_device *dev,
                              enum pipe pipe, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int sprite0_start, sprite1_start, size;
 
        switch (pipe) {
@@ -397,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev,
 
 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dsparb = I915_READ(DSPARB);
        int size;
 
@@ -413,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
 
 static int i830_get_fifo_size(struct drm_device *dev, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dsparb = I915_READ(DSPARB);
        int size;
 
@@ -430,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
 
 static int i845_get_fifo_size(struct drm_device *dev, int plane)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dsparb = I915_READ(DSPARB);
        int size;
 
@@ -608,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
 static void pineview_update_wm(struct drm_crtc *unused_crtc)
 {
        struct drm_device *dev = unused_crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        const struct cxsr_latency *latency;
        u32 reg;
@@ -905,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
 
 static void vlv_setup_wm_latency(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* all latencies in usec */
        dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1296,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev,
 static void vlv_update_wm(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum pipe pipe = intel_crtc->pipe;
        struct vlv_wm_values wm = {};
@@ -1352,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        static const int sr_latency_ns = 12000;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
        int plane_sr, cursor_sr;
        unsigned int enabled = 0;
@@ -1409,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
 static void i965_update_wm(struct drm_crtc *unused_crtc)
 {
        struct drm_device *dev = unused_crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        int srwm = 1;
        int cursor_sr = 16;
@@ -1483,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 {
        struct drm_device *dev = unused_crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        const struct intel_watermark_params *wm_info;
        uint32_t fwater_lo;
        uint32_t fwater_hi;
@@ -1613,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 static void i845_update_wm(struct drm_crtc *unused_crtc)
 {
        struct drm_device *dev = unused_crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        const struct drm_display_mode *adjusted_mode;
        uint32_t fwater_lo;
@@ -2012,10 +2041,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
 }
 
 static uint32_t
-hsw_compute_linetime_wm(struct drm_device *dev,
-                       struct intel_crtc_state *cstate)
+hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct intel_atomic_state *intel_state =
+               to_intel_atomic_state(cstate->base.state);
        const struct drm_display_mode *adjusted_mode =
                &cstate->base.adjusted_mode;
        u32 linetime, ips_linetime;
@@ -2024,7 +2053,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
                return 0;
        if (WARN_ON(adjusted_mode->crtc_clock == 0))
                return 0;
-       if (WARN_ON(dev_priv->cdclk_freq == 0))
+       if (WARN_ON(intel_state->cdclk == 0))
                return 0;
 
        /* The WM are computed with base on how long it takes to fill a single
@@ -2033,7 +2062,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
        linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
                                     adjusted_mode->crtc_clock);
        ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
-                                        dev_priv->cdclk_freq);
+                                        intel_state->cdclk);
 
        return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
               PIPE_WM_LINETIME_TIME(linetime);
@@ -2041,7 +2070,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
 
 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (IS_GEN9(dev)) {
                uint32_t val;
@@ -2146,14 +2175,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
 {
        /* ILK sprite LP0 latency is 1300 ns */
-       if (INTEL_INFO(dev)->gen == 5)
+       if (IS_GEN5(dev))
                wm[0] = 13;
 }
 
 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
 {
        /* ILK cursor LP0 latency is 1300 ns */
-       if (INTEL_INFO(dev)->gen == 5)
+       if (IS_GEN5(dev))
                wm[0] = 13;
 
        /* WaDoubleCursorLP3Latency:ivb */
@@ -2207,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
                                    uint16_t wm[5], uint16_t min)
 {
-       int level, max_level = ilk_wm_max_level(dev_priv->dev);
+       int level, max_level = ilk_wm_max_level(&dev_priv->drm);
 
        if (wm[0] >= min)
                return false;
@@ -2221,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 
 static void snb_wm_latency_quirk(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        bool changed;
 
        /*
@@ -2243,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
 
 static void ilk_setup_wm_latency(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
 
@@ -2265,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
 
 static void skl_setup_wm_latency(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
        intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
@@ -2301,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
        struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
        struct intel_pipe_wm *pipe_wm;
        struct drm_device *dev = state->dev;
-       const struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane;
        struct intel_plane_state *pristate = NULL;
        struct intel_plane_state *sprstate = NULL;
@@ -2309,7 +2338,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
        int level, max_level = ilk_wm_max_level(dev), usable_level;
        struct ilk_wm_maximums max;
 
-       pipe_wm = &cstate->wm.optimal.ilk;
+       pipe_wm = &cstate->wm.ilk.optimal;
 
        for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
                struct intel_plane_state *ps;
@@ -2352,7 +2381,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
        pipe_wm->wm[0] = pipe_wm->raw_wm[0];
 
        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-               pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
+               pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
 
        if (!ilk_validate_pipe_wm(dev, pipe_wm))
                return -EINVAL;
@@ -2391,7 +2420,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
                                       struct intel_crtc *intel_crtc,
                                       struct intel_crtc_state *newstate)
 {
-       struct intel_pipe_wm *a = &newstate->wm.intermediate;
+       struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
        struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
        int level, max_level = ilk_wm_max_level(dev);
 
@@ -2400,7 +2429,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
         * currently active watermarks to get values that are safe both before
         * and after the vblank.
         */
-       *a = newstate->wm.optimal.ilk;
+       *a = newstate->wm.ilk.optimal;
        a->pipe_enabled |= b->pipe_enabled;
        a->sprites_enabled |= b->sprites_enabled;
        a->sprites_scaled |= b->sprites_scaled;
@@ -2429,7 +2458,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
         * If our intermediate WM are identical to the final WM, then we can
         * omit the post-vblank programming; only update if it's different.
         */
-       if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0)
+       if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
                newstate->wm.need_postvbl_update = false;
 
        return 0;
@@ -2476,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev,
                         const struct ilk_wm_maximums *max,
                         struct intel_pipe_wm *merged)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int level, max_level = ilk_wm_max_level(dev);
        int last_enabled_level = max_level;
 
@@ -2536,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
 /* The value we need to program into the WM_LPx latency field */
 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
                return 2 * level;
@@ -2736,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
                                struct ilk_wm_values *results)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct ilk_wm_values *previous = &dev_priv->wm.hw;
        unsigned int dirty;
        uint32_t val;
@@ -2811,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
 
 bool ilk_disable_lp_wm(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
@@ -2849,20 +2878,29 @@ skl_wm_plane_id(const struct intel_plane *plane)
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
                                   const struct intel_crtc_state *cstate,
-                                  const struct intel_wm_config *config,
-                                  struct skl_ddb_entry *alloc /* out */)
+                                  struct skl_ddb_entry *alloc, /* out */
+                                  int *num_active /* out */)
 {
+       struct drm_atomic_state *state = cstate->base.state;
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *for_crtc = cstate->base.crtc;
-       struct drm_crtc *crtc;
        unsigned int pipe_size, ddb_size;
        int nth_active_pipe;
+       int pipe = to_intel_crtc(for_crtc)->pipe;
 
-       if (!cstate->base.active) {
+       if (WARN_ON(!state) || !cstate->base.active) {
                alloc->start = 0;
                alloc->end = 0;
+               *num_active = hweight32(dev_priv->active_crtcs);
                return;
        }
 
+       if (intel_state->active_pipe_changes)
+               *num_active = hweight32(intel_state->active_crtcs);
+       else
+               *num_active = hweight32(dev_priv->active_crtcs);
+
        if (IS_BROXTON(dev))
                ddb_size = BXT_DDB_SIZE;
        else
@@ -2870,25 +2908,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
 
        ddb_size -= 4; /* 4 blocks for bypass path allocation */
 
-       nth_active_pipe = 0;
-       for_each_crtc(dev, crtc) {
-               if (!to_intel_crtc(crtc)->active)
-                       continue;
-
-               if (crtc == for_crtc)
-                       break;
-
-               nth_active_pipe++;
+       /*
+        * If the state doesn't change the active CRTC's, then there's
+        * no need to recalculate; the existing pipe allocation limits
+        * should remain unchanged.  Note that we're safe from racing
+        * commits since any racing commit that changes the active CRTC
+        * list would need to grab _all_ crtc locks, including the one
+        * we currently hold.
+        */
+       if (!intel_state->active_pipe_changes) {
+               *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
+               return;
        }
 
-       pipe_size = ddb_size / config->num_pipes_active;
-       alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
+       nth_active_pipe = hweight32(intel_state->active_crtcs &
+                                   (drm_crtc_mask(for_crtc) - 1));
+       pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
+       alloc->start = nth_active_pipe * ddb_size / *num_active;
        alloc->end = alloc->start + pipe_size;
 }
 
-static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
+static unsigned int skl_cursor_allocation(int num_active)
 {
-       if (config->num_pipes_active == 1)
+       if (num_active == 1)
                return 32;
 
        return 8;
@@ -2932,6 +2974,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
        }
 }
 
+/*
+ * Determines the downscale amount of a plane for the purposes of watermark calculations.
+ * The bspec defines downscale amount as:
+ *
+ * """
+ * Horizontal down scale amount = maximum[1, Horizontal source size /
+ *                                           Horizontal destination size]
+ * Vertical down scale amount = maximum[1, Vertical source size /
+ *                                         Vertical destination size]
+ * Total down scale amount = Horizontal down scale amount *
+ *                           Vertical down scale amount
+ * """
+ *
+ * Return value is provided in 16.16 fixed point form to retain fractional part.
+ * Caller should take care of dividing & rounding off the value.
+ */
+static uint32_t
+skl_plane_downscale_amount(const struct intel_plane_state *pstate)
+{
+       uint32_t downscale_h, downscale_w;
+       uint32_t src_w, src_h, dst_w, dst_h;
+
+       if (WARN_ON(!pstate->visible))
+               return DRM_PLANE_HELPER_NO_SCALING;
+
+       /* n.b., src is 16.16 fixed point, dst is whole integer */
+       src_w = drm_rect_width(&pstate->src);
+       src_h = drm_rect_height(&pstate->src);
+       dst_w = drm_rect_width(&pstate->dst);
+       dst_h = drm_rect_height(&pstate->dst);
+       if (intel_rotation_90_or_270(pstate->base.rotation))
+               swap(dst_w, dst_h);
+
+       downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
+       downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
+
+       /* Provide result in 16.16 fixed point */
+       return (uint64_t)downscale_w * downscale_h >> 16;
+}
+
 static unsigned int
 skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
                             const struct drm_plane_state *pstate,
@@ -2939,7 +3021,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
 {
        struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
        struct drm_framebuffer *fb = pstate->fb;
+       uint32_t down_scale_amount, data_rate;
        uint32_t width = 0, height = 0;
+       unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
+
+       if (!intel_pstate->visible)
+               return 0;
+       if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
+               return 0;
+       if (y && format != DRM_FORMAT_NV12)
+               return 0;
 
        width = drm_rect_width(&intel_pstate->src) >> 16;
        height = drm_rect_height(&intel_pstate->src) >> 16;
@@ -2948,17 +3039,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
                swap(width, height);
 
        /* for planar format */
-       if (fb->pixel_format == DRM_FORMAT_NV12) {
+       if (format == DRM_FORMAT_NV12) {
                if (y)  /* y-plane data rate */
-                       return width * height *
-                               drm_format_plane_cpp(fb->pixel_format, 0);
+                       data_rate = width * height *
+                               drm_format_plane_cpp(format, 0);
                else    /* uv-plane data rate */
-                       return (width / 2) * (height / 2) *
-                               drm_format_plane_cpp(fb->pixel_format, 1);
+                       data_rate = (width / 2) * (height / 2) *
+                               drm_format_plane_cpp(format, 1);
+       } else {
+               /* for packed formats */
+               data_rate = width * height * drm_format_plane_cpp(format, 0);
        }
 
-       /* for packed formats */
-       return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
+       down_scale_amount = skl_plane_downscale_amount(intel_pstate);
+
+       return (uint64_t)data_rate * down_scale_amount >> 16;
 }
 
 /*
@@ -2967,86 +3062,188 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
  *   3 * 4096 * 8192  * 4 < 2^32
  */
 static unsigned int
-skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate)
+skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
-       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_crtc_state *cstate = &intel_cstate->base;
+       struct drm_atomic_state *state = cstate->state;
+       struct drm_crtc *crtc = cstate->crtc;
+       struct drm_device *dev = crtc->dev;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       const struct drm_plane *plane;
        const struct intel_plane *intel_plane;
-       unsigned int total_data_rate = 0;
+       struct drm_plane_state *pstate;
+       unsigned int rate, total_data_rate = 0;
+       int id;
+       int i;
 
-       for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
-               const struct drm_plane_state *pstate = intel_plane->base.state;
+       if (WARN_ON(!state))
+               return 0;
 
-               if (pstate->fb == NULL)
-                       continue;
+       /* Calculate and cache data rate for each plane */
+       for_each_plane_in_state(state, plane, pstate, i) {
+               id = skl_wm_plane_id(to_intel_plane(plane));
+               intel_plane = to_intel_plane(plane);
 
-               if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
+               if (intel_plane->pipe != intel_crtc->pipe)
                        continue;
 
                /* packed/uv */
-               total_data_rate += skl_plane_relative_data_rate(cstate,
-                                                               pstate,
-                                                               0);
+               rate = skl_plane_relative_data_rate(intel_cstate,
+                                                   pstate, 0);
+               intel_cstate->wm.skl.plane_data_rate[id] = rate;
 
-               if (pstate->fb->pixel_format == DRM_FORMAT_NV12)
-                       /* y-plane */
-                       total_data_rate += skl_plane_relative_data_rate(cstate,
-                                                                       pstate,
-                                                                       1);
+               /* y-plane */
+               rate = skl_plane_relative_data_rate(intel_cstate,
+                                                   pstate, 1);
+               intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
        }
 
+       /* Calculate CRTC's total data rate from cached values */
+       for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+               int id = skl_wm_plane_id(intel_plane);
+
+               /* packed/uv */
+               total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
+               total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
+       }
+
+       WARN_ON(cstate->plane_mask && total_data_rate == 0);
+
        return total_data_rate;
 }
 
-static void
+static uint16_t
+skl_ddb_min_alloc(const struct drm_plane_state *pstate,
+                 const int y)
+{
+       struct drm_framebuffer *fb = pstate->fb;
+       struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+       uint32_t src_w, src_h;
+       uint32_t min_scanlines = 8;
+       uint8_t plane_bpp;
+
+       if (WARN_ON(!fb))
+               return 0;
+
+       /* For packed formats, no y-plane, return 0 */
+       if (y && fb->pixel_format != DRM_FORMAT_NV12)
+               return 0;
+
+       /* For Non Y-tile return 8-blocks */
+       if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
+           fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
+               return 8;
+
+       src_w = drm_rect_width(&intel_pstate->src) >> 16;
+       src_h = drm_rect_height(&intel_pstate->src) >> 16;
+
+       if (intel_rotation_90_or_270(pstate->rotation))
+               swap(src_w, src_h);
+
+       /* Halve UV plane width and height for NV12 */
+       if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
+               src_w /= 2;
+               src_h /= 2;
+       }
+
+       if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
+               plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+       else
+               plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
+       if (intel_rotation_90_or_270(pstate->rotation)) {
+               switch (plane_bpp) {
+               case 1:
+                       min_scanlines = 32;
+                       break;
+               case 2:
+                       min_scanlines = 16;
+                       break;
+               case 4:
+                       min_scanlines = 8;
+                       break;
+               case 8:
+                       min_scanlines = 4;
+                       break;
+               default:
+                       WARN(1, "Unsupported pixel depth %u for rotation",
+                            plane_bpp);
+                       min_scanlines = 32;
+               }
+       }
+
+       return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
+}
+
+static int
 skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                      struct skl_ddb_allocation *ddb /* out */)
 {
+       struct drm_atomic_state *state = cstate->base.state;
        struct drm_crtc *crtc = cstate->base.crtc;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_wm_config *config = &dev_priv->wm.config;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_plane *intel_plane;
+       struct drm_plane *plane;
+       struct drm_plane_state *pstate;
        enum pipe pipe = intel_crtc->pipe;
        struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
        uint16_t alloc_size, start, cursor_blocks;
-       uint16_t minimum[I915_MAX_PLANES];
-       uint16_t y_minimum[I915_MAX_PLANES];
+       uint16_t *minimum = cstate->wm.skl.minimum_blocks;
+       uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
        unsigned int total_data_rate;
+       int num_active;
+       int id, i;
 
-       skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc);
+       if (WARN_ON(!state))
+               return 0;
+
+       if (!cstate->base.active) {
+               ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
+               memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
+               memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
+               return 0;
+       }
+
+       skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
        alloc_size = skl_ddb_entry_size(alloc);
        if (alloc_size == 0) {
                memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
-               memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
-                      sizeof(ddb->plane[pipe][PLANE_CURSOR]));
-               return;
+               return 0;
        }
 
-       cursor_blocks = skl_cursor_allocation(config);
+       cursor_blocks = skl_cursor_allocation(num_active);
        ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
        ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
 
        alloc_size -= cursor_blocks;
-       alloc->end -= cursor_blocks;
 
        /* 1. Allocate the mininum required blocks for each active plane */
-       for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
-               struct drm_plane *plane = &intel_plane->base;
-               struct drm_framebuffer *fb = plane->state->fb;
-               int id = skl_wm_plane_id(intel_plane);
+       for_each_plane_in_state(state, plane, pstate, i) {
+               intel_plane = to_intel_plane(plane);
+               id = skl_wm_plane_id(intel_plane);
 
-               if (!to_intel_plane_state(plane->state)->visible)
+               if (intel_plane->pipe != pipe)
                        continue;
 
-               if (plane->type == DRM_PLANE_TYPE_CURSOR)
+               if (!to_intel_plane_state(pstate)->visible) {
+                       minimum[id] = 0;
+                       y_minimum[id] = 0;
                        continue;
+               }
+               if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+                       minimum[id] = 0;
+                       y_minimum[id] = 0;
+                       continue;
+               }
 
-               minimum[id] = 8;
-               alloc_size -= minimum[id];
-               y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0;
-               alloc_size -= y_minimum[id];
+               minimum[id] = skl_ddb_min_alloc(pstate, 0);
+               y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+       }
+
+       for (i = 0; i < PLANE_CURSOR; i++) {
+               alloc_size -= minimum[i];
+               alloc_size -= y_minimum[i];
        }
 
        /*
@@ -3056,21 +3253,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
         * FIXME: we may not allocate every single block here.
         */
        total_data_rate = skl_get_total_relative_data_rate(cstate);
+       if (total_data_rate == 0)
+               return 0;
 
        start = alloc->start;
        for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
-               struct drm_plane *plane = &intel_plane->base;
-               struct drm_plane_state *pstate = intel_plane->base.state;
                unsigned int data_rate, y_data_rate;
                uint16_t plane_blocks, y_plane_blocks = 0;
                int id = skl_wm_plane_id(intel_plane);
 
-               if (!to_intel_plane_state(pstate)->visible)
-                       continue;
-               if (plane->type == DRM_PLANE_TYPE_CURSOR)
-                       continue;
-
-               data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
+               data_rate = cstate->wm.skl.plane_data_rate[id];
 
                /*
                 * allocation for (packed formats) or (uv-plane part of planar format):
@@ -3081,30 +3273,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
                                        total_data_rate);
 
-               ddb->plane[pipe][id].start = start;
-               ddb->plane[pipe][id].end = start + plane_blocks;
+               /* Leave disabled planes at (0,0) */
+               if (data_rate) {
+                       ddb->plane[pipe][id].start = start;
+                       ddb->plane[pipe][id].end = start + plane_blocks;
+               }
 
                start += plane_blocks;
 
                /*
                 * allocation for y_plane part of planar format:
                 */
-               if (pstate->fb->pixel_format == DRM_FORMAT_NV12) {
-                       y_data_rate = skl_plane_relative_data_rate(cstate,
-                                                                  pstate,
-                                                                  1);
-                       y_plane_blocks = y_minimum[id];
-                       y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
-                                               total_data_rate);
+               y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
 
+               y_plane_blocks = y_minimum[id];
+               y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
+                                       total_data_rate);
+
+               if (y_data_rate) {
                        ddb->y_plane[pipe][id].start = start;
                        ddb->y_plane[pipe][id].end = start + y_plane_blocks;
-
-                       start += y_plane_blocks;
                }
 
+               start += y_plane_blocks;
        }
 
+       return 0;
 }
 
 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3161,35 +3355,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
        return ret;
 }
 
-static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
-                                      const struct intel_crtc *intel_crtc)
+static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
+                                             struct intel_plane_state *pstate)
 {
-       struct drm_device *dev = intel_crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
+       uint64_t adjusted_pixel_rate;
+       uint64_t downscale_amount;
+       uint64_t pixel_rate;
+
+       /* Shouldn't reach here on disabled planes... */
+       if (WARN_ON(!pstate->visible))
+               return 0;
 
        /*
-        * If ddb allocation of pipes changed, it may require recalculation of
-        * watermarks
+        * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
+        * with additional adjustments for plane-specific scaling.
         */
-       if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe)))
-               return true;
+       adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
+       downscale_amount = skl_plane_downscale_amount(pstate);
 
-       return false;
+       pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
+       WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
+
+       return pixel_rate;
 }
 
-static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
-                                struct intel_crtc_state *cstate,
-                                struct intel_plane *intel_plane,
-                                uint16_t ddb_allocation,
-                                int level,
-                                uint16_t *out_blocks, /* out */
-                                uint8_t *out_lines /* out */)
+static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
+                               struct intel_crtc_state *cstate,
+                               struct intel_plane_state *intel_pstate,
+                               uint16_t ddb_allocation,
+                               int level,
+                               uint16_t *out_blocks, /* out */
+                               uint8_t *out_lines, /* out */
+                               bool *enabled /* out */)
 {
-       struct drm_plane *plane = &intel_plane->base;
-       struct drm_framebuffer *fb = plane->state->fb;
-       struct intel_plane_state *intel_pstate =
-                                       to_intel_plane_state(plane->state);
+       struct drm_plane_state *pstate = &intel_pstate->base;
+       struct drm_framebuffer *fb = pstate->fb;
        uint32_t latency = dev_priv->wm.skl_latency[level];
        uint32_t method1, method2;
        uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3197,20 +3397,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
        uint32_t selected_result;
        uint8_t cpp;
        uint32_t width = 0, height = 0;
+       uint32_t plane_pixel_rate;
 
-       if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
-               return false;
+       if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
+               *enabled = false;
+               return 0;
+       }
 
        width = drm_rect_width(&intel_pstate->src) >> 16;
        height = drm_rect_height(&intel_pstate->src) >> 16;
 
-       if (intel_rotation_90_or_270(plane->state->rotation))
+       if (intel_rotation_90_or_270(pstate->rotation))
                swap(width, height);
 
        cpp = drm_format_plane_cpp(fb->pixel_format, 0);
-       method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
-                                cpp, latency);
-       method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
+       plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
+
+       method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
+       method2 = skl_wm_method2(plane_pixel_rate,
                                 cstate->base.adjusted_mode.crtc_htotal,
                                 width,
                                 cpp,
@@ -3224,7 +3428,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
            fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
                uint32_t min_scanlines = 4;
                uint32_t y_tile_minimum;
-               if (intel_rotation_90_or_270(plane->state->rotation)) {
+               if (intel_rotation_90_or_270(pstate->rotation)) {
                        int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
                                drm_format_plane_cpp(fb->pixel_format, 1) :
                                drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3260,40 +3464,100 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
                        res_blocks++;
        }
 
-       if (res_blocks >= ddb_allocation || res_lines > 31)
-               return false;
+       if (res_blocks >= ddb_allocation || res_lines > 31) {
+               *enabled = false;
+
+               /*
+                * If there are no valid level 0 watermarks, then we can't
+                * support this display configuration.
+                */
+               if (level) {
+                       return 0;
+               } else {
+                       DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
+                       DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
+                                     to_intel_crtc(cstate->base.crtc)->pipe,
+                                     skl_wm_plane_id(to_intel_plane(pstate->plane)),
+                                     res_blocks, ddb_allocation, res_lines);
+
+                       return -EINVAL;
+               }
+       }
 
        *out_blocks = res_blocks;
        *out_lines = res_lines;
+       *enabled = true;
 
-       return true;
+       return 0;
 }
 
-static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
-                                struct skl_ddb_allocation *ddb,
-                                struct intel_crtc_state *cstate,
-                                int level,
-                                struct skl_wm_level *result)
+static int
+skl_compute_wm_level(const struct drm_i915_private *dev_priv,
+                    struct skl_ddb_allocation *ddb,
+                    struct intel_crtc_state *cstate,
+                    int level,
+                    struct skl_wm_level *result)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_atomic_state *state = cstate->base.state;
        struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+       struct drm_plane *plane;
        struct intel_plane *intel_plane;
+       struct intel_plane_state *intel_pstate;
        uint16_t ddb_blocks;
        enum pipe pipe = intel_crtc->pipe;
+       int ret;
 
-       for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+       /*
+        * We'll only calculate watermarks for planes that are actually
+        * enabled, so make sure all other planes are set as disabled.
+        */
+       memset(result, 0, sizeof(*result));
+
+       for_each_intel_plane_mask(&dev_priv->drm,
+                                 intel_plane,
+                                 cstate->base.plane_mask) {
                int i = skl_wm_plane_id(intel_plane);
 
+               plane = &intel_plane->base;
+               intel_pstate = NULL;
+               if (state)
+                       intel_pstate =
+                               intel_atomic_get_existing_plane_state(state,
+                                                                     intel_plane);
+
+               /*
+                * Note: If we start supporting multiple pending atomic commits
+                * against the same planes/CRTC's in the future, plane->state
+                * will no longer be the correct pre-state to use for the
+                * calculations here and we'll need to change where we get the
+                * 'unchanged' plane data from.
+                *
+                * For now this is fine because we only allow one queued commit
+                * against a CRTC.  Even if the plane isn't modified by this
+                * transaction and we don't have a plane lock, we still have
+                * the CRTC's lock, so we know that no other transactions are
+                * racing with us to update it.
+                */
+               if (!intel_pstate)
+                       intel_pstate = to_intel_plane_state(plane->state);
+
+               WARN_ON(!intel_pstate->base.fb);
+
                ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
 
-               result->plane_en[i] = skl_compute_plane_wm(dev_priv,
-                                               cstate,
-                                               intel_plane,
-                                               ddb_blocks,
-                                               level,
-                                               &result->plane_res_b[i],
-                                               &result->plane_res_l[i]);
+               ret = skl_compute_plane_wm(dev_priv,
+                                          cstate,
+                                          intel_pstate,
+                                          ddb_blocks,
+                                          level,
+                                          &result->plane_res_b[i],
+                                          &result->plane_res_l[i],
+                                          &result->plane_en[i]);
+               if (ret)
+                       return ret;
        }
+
+       return 0;
 }
 
 static uint32_t
@@ -3327,21 +3591,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
        }
 }
 
-static void skl_compute_pipe_wm(struct intel_crtc_state *cstate,
-                               struct skl_ddb_allocation *ddb,
-                               struct skl_pipe_wm *pipe_wm)
+static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
+                            struct skl_ddb_allocation *ddb,
+                            struct skl_pipe_wm *pipe_wm)
 {
        struct drm_device *dev = cstate->base.crtc->dev;
-       const struct drm_i915_private *dev_priv = dev->dev_private;
+       const struct drm_i915_private *dev_priv = to_i915(dev);
        int level, max_level = ilk_wm_max_level(dev);
+       int ret;
 
        for (level = 0; level <= max_level; level++) {
-               skl_compute_wm_level(dev_priv, ddb, cstate,
-                                    level, &pipe_wm->wm[level]);
+               ret = skl_compute_wm_level(dev_priv, ddb, cstate,
+                                          level, &pipe_wm->wm[level]);
+               if (ret)
+                       return ret;
        }
        pipe_wm->linetime = skl_compute_linetime_wm(cstate);
 
        skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
+
+       return 0;
 }
 
 static void skl_compute_wm_results(struct drm_device *dev,
@@ -3414,14 +3683,16 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
                                const struct skl_wm_values *new)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_crtc *crtc;
 
        for_each_intel_crtc(dev, crtc) {
                int i, level, max_level = ilk_wm_max_level(dev);
                enum pipe pipe = crtc->pipe;
 
-               if (!new->dirty[pipe])
+               if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
+                       continue;
+               if (!crtc->active)
                        continue;
 
                I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
@@ -3509,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
                                struct skl_wm_values *new_values)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct skl_ddb_allocation *cur_ddb, *new_ddb;
        bool reallocated[I915_MAX_PIPES] = {};
        struct intel_crtc *crtc;
@@ -3588,116 +3859,182 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
        }
 }
 
-static bool skl_update_pipe_wm(struct drm_crtc *crtc,
-                              struct skl_ddb_allocation *ddb, /* out */
-                              struct skl_pipe_wm *pipe_wm /* out */)
+static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
+                             struct skl_ddb_allocation *ddb, /* out */
+                             struct skl_pipe_wm *pipe_wm, /* out */
+                             bool *changed /* out */)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+       struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
+       struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
+       int ret;
 
-       skl_allocate_pipe_ddb(cstate, ddb);
-       skl_compute_pipe_wm(cstate, ddb, pipe_wm);
+       ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+       if (ret)
+               return ret;
 
        if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
-               return false;
+               *changed = false;
+       else
+               *changed = true;
 
-       intel_crtc->wm.active.skl = *pipe_wm;
+       return 0;
+}
 
-       return true;
+static uint32_t
+pipes_modified(struct drm_atomic_state *state)
+{
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *cstate;
+       uint32_t i, ret = 0;
+
+       for_each_crtc_in_state(state, crtc, cstate, i)
+               ret |= drm_crtc_mask(crtc);
+
+       return ret;
 }
 
-static void skl_update_other_pipe_wm(struct drm_device *dev,
-                                    struct drm_crtc *crtc,
-                                    struct skl_wm_values *r)
+static int
+skl_compute_ddb(struct drm_atomic_state *state)
 {
+       struct drm_device *dev = state->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct intel_crtc *intel_crtc;
-       struct intel_crtc *this_crtc = to_intel_crtc(crtc);
+       struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+       uint32_t realloc_pipes = pipes_modified(state);
+       int ret;
 
        /*
-        * If the WM update hasn't changed the allocation for this_crtc (the
-        * crtc we are currently computing the new WM values for), other
-        * enabled crtcs will keep the same allocation and we don't need to
-        * recompute anything for them.
+        * If this is our first atomic update following hardware readout,
+        * we can't trust the DDB that the BIOS programmed for us.  Let's
+        * pretend that all pipes switched active status so that we'll
+        * ensure a full DDB recompute.
         */
-       if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
-               return;
+       if (dev_priv->wm.distrust_bios_wm)
+               intel_state->active_pipe_changes = ~0;
 
        /*
-        * Otherwise, because of this_crtc being freshly enabled/disabled, the
-        * other active pipes need new DDB allocation and WM values.
+        * If the modeset changes which CRTC's are active, we need to
+        * recompute the DDB allocation for *all* active pipes, even
+        * those that weren't otherwise being modified in any way by this
+        * atomic commit.  Due to the shrinking of the per-pipe allocations
+        * when new active CRTC's are added, it's possible for a pipe that
+        * we were already using and aren't changing at all here to suddenly
+        * become invalid if its DDB needs exceeds its new allocation.
+        *
+        * Note that if we wind up doing a full DDB recompute, we can't let
+        * any other display updates race with this transaction, so we need
+        * to grab the lock on *all* CRTC's.
         */
-       for_each_intel_crtc(dev, intel_crtc) {
-               struct skl_pipe_wm pipe_wm = {};
-               bool wm_changed;
-
-               if (this_crtc->pipe == intel_crtc->pipe)
-                       continue;
-
-               if (!intel_crtc->active)
-                       continue;
+       if (intel_state->active_pipe_changes) {
+               realloc_pipes = ~0;
+               intel_state->wm_results.dirty_pipes = ~0;
+       }
 
-               wm_changed = skl_update_pipe_wm(&intel_crtc->base,
-                                               &r->ddb, &pipe_wm);
+       for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
+               struct intel_crtc_state *cstate;
 
-               /*
-                * If we end up re-computing the other pipe WM values, it's
-                * because it was really needed, so we expect the WM values to
-                * be different.
-                */
-               WARN_ON(!wm_changed);
+               cstate = intel_atomic_get_crtc_state(state, intel_crtc);
+               if (IS_ERR(cstate))
+                       return PTR_ERR(cstate);
 
-               skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
-               r->dirty[intel_crtc->pipe] = true;
+               ret = skl_allocate_pipe_ddb(cstate, ddb);
+               if (ret)
+                       return ret;
        }
+
+       return 0;
 }
 
-static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
+static int
+skl_compute_wm(struct drm_atomic_state *state)
 {
-       watermarks->wm_linetime[pipe] = 0;
-       memset(watermarks->plane[pipe], 0,
-              sizeof(uint32_t) * 8 * I915_MAX_PLANES);
-       memset(watermarks->plane_trans[pipe],
-              0, sizeof(uint32_t) * I915_MAX_PLANES);
-       watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *cstate;
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct skl_wm_values *results = &intel_state->wm_results;
+       struct skl_pipe_wm *pipe_wm;
+       bool changed = false;
+       int ret, i;
 
-       /* Clear ddb entries for pipe */
-       memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
-       memset(&watermarks->ddb.plane[pipe], 0,
-              sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
-       memset(&watermarks->ddb.y_plane[pipe], 0,
-              sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
-       memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
-              sizeof(struct skl_ddb_entry));
+       /*
+        * If this transaction isn't actually touching any CRTC's, don't
+        * bother with watermark calculation.  Note that if we pass this
+        * test, we're guaranteed to hold at least one CRTC state mutex,
+        * which means we can safely use values like dev_priv->active_crtcs
+        * since any racing commits that want to update them would need to
+        * hold _all_ CRTC state mutexes.
+        */
+       for_each_crtc_in_state(state, crtc, cstate, i)
+               changed = true;
+       if (!changed)
+               return 0;
 
+       /* Clear all dirty flags */
+       results->dirty_pipes = 0;
+
+       ret = skl_compute_ddb(state);
+       if (ret)
+               return ret;
+
+       /*
+        * Calculate WM's for all pipes that are part of this transaction.
+        * Note that the DDB allocation above may have added more CRTC's that
+        * weren't otherwise being modified (and set bits in dirty_pipes) if
+        * pipe allocations had to change.
+        *
+        * FIXME:  Now that we're doing this in the atomic check phase, we
+        * should allow skl_update_pipe_wm() to return failure in cases where
+        * no suitable watermark values can be found.
+        */
+       for_each_crtc_in_state(state, crtc, cstate, i) {
+               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+               struct intel_crtc_state *intel_cstate =
+                       to_intel_crtc_state(cstate);
+
+               pipe_wm = &intel_cstate->wm.skl.optimal;
+               ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
+                                        &changed);
+               if (ret)
+                       return ret;
+
+               if (changed)
+                       results->dirty_pipes |= drm_crtc_mask(crtc);
+
+               if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
+                       /* This pipe's WM's did not change */
+                       continue;
+
+               intel_cstate->update_wm_pre = true;
+               skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
+       }
+
+       return 0;
 }
 
 static void skl_update_wm(struct drm_crtc *crtc)
 {
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_wm_values *results = &dev_priv->wm.skl_results;
        struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
-       struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl;
+       struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
 
-
-       /* Clear all dirty flags */
-       memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
-
-       skl_clear_wm(results, intel_crtc->pipe);
-
-       if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
+       if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
                return;
 
-       skl_compute_wm_results(dev, pipe_wm, results, intel_crtc);
-       results->dirty[intel_crtc->pipe] = true;
+       intel_crtc->wm.active.skl = *pipe_wm;
+
+       mutex_lock(&dev_priv->wm.wm_mutex);
 
-       skl_update_other_pipe_wm(dev, crtc, results);
        skl_write_wm_values(dev_priv, results);
        skl_flush_wm_values(dev_priv, results);
 
        /* store the new configuration */
        dev_priv->wm.skl_hw = *results;
+
+       mutex_unlock(&dev_priv->wm.wm_mutex);
 }
 
 static void ilk_compute_wm_config(struct drm_device *dev,
@@ -3720,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
 
 static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
        struct ilk_wm_maximums max;
        struct intel_wm_config config = {};
@@ -3757,7 +4094,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
        struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
 
        mutex_lock(&dev_priv->wm.wm_mutex);
-       intel_crtc->wm.active.ilk = cstate->wm.intermediate;
+       intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
        ilk_program_watermarks(dev_priv);
        mutex_unlock(&dev_priv->wm.wm_mutex);
 }
@@ -3769,7 +4106,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
 
        mutex_lock(&dev_priv->wm.wm_mutex);
        if (cstate->wm.need_postvbl_update) {
-               intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
+               intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
                ilk_program_watermarks(dev_priv);
        }
        mutex_unlock(&dev_priv->wm.wm_mutex);
@@ -3822,11 +4159,11 @@ static void skl_pipe_wm_active_state(uint32_t val,
 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
-       struct skl_pipe_wm *active = &cstate->wm.optimal.skl;
+       struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
        enum pipe pipe = intel_crtc->pipe;
        int level, i, max_level;
        uint32_t temp;
@@ -3849,7 +4186,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
        if (!intel_crtc->active)
                return;
 
-       hw->dirty[pipe] = true;
+       hw->dirty_pipes |= drm_crtc_mask(crtc);
 
        active->linetime = hw->wm_linetime[pipe];
 
@@ -3876,23 +4213,31 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 
 void skl_wm_get_hw_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
        struct drm_crtc *crtc;
 
        skl_ddb_get_hw_state(dev_priv, ddb);
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
                skl_pipe_wm_get_hw_state(crtc);
+
+       if (dev_priv->active_crtcs) {
+               /* Fully recompute DDB on first atomic commit */
+               dev_priv->wm.distrust_bios_wm = true;
+       } else {
+               /* Easy/common case; just sanitize DDB now if everything off */
+               memset(ddb, 0, sizeof(*ddb));
+       }
 }
 
 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct ilk_wm_values *hw = &dev_priv->wm.hw;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
-       struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
+       struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
        enum pipe pipe = intel_crtc->pipe;
        static const i915_reg_t wm0_pipe_reg[] = {
                [PIPE_A] = WM0_PIPEA_ILK,
@@ -4092,7 +4437,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
 
 void ilk_wm_get_hw_state(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct ilk_wm_values *hw = &dev_priv->wm.hw;
        struct drm_crtc *crtc;
 
@@ -4154,7 +4499,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
  */
 void intel_update_watermarks(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 
        if (dev_priv->display.update_wm)
                dev_priv->display.update_wm(crtc);
@@ -4169,9 +4514,8 @@ DEFINE_SPINLOCK(mchdev_lock);
  * mchdev_lock. */
 static struct drm_i915_private *i915_mch_dev;
 
-bool ironlake_set_drps(struct drm_device *dev, u8 val)
+bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u16 rgvswctl;
 
        assert_spin_locked(&mchdev_lock);
@@ -4193,9 +4537,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
        return true;
 }
 
-static void ironlake_enable_drps(struct drm_device *dev)
+static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 rgvmodectl;
        u8 fmax, fmin, fstart, vstart;
 
@@ -4252,7 +4595,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
                DRM_ERROR("stuck trying to change perf mode\n");
        mdelay(1);
 
-       ironlake_set_drps(dev, fstart);
+       ironlake_set_drps(dev_priv, fstart);
 
        dev_priv->ips.last_count1 = I915_READ(DMIEC) +
                I915_READ(DDREC) + I915_READ(CSIEC);
@@ -4263,9 +4606,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
        spin_unlock_irq(&mchdev_lock);
 }
 
-static void ironlake_disable_drps(struct drm_device *dev)
+static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u16 rgvswctl;
 
        spin_lock_irq(&mchdev_lock);
@@ -4280,7 +4622,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
        I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
 
        /* Go back to the starting frequency */
-       ironlake_set_drps(dev, dev_priv->ips.fstart);
+       ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
        mdelay(1);
        rgvswctl |= MEMCTL_CMD_STS;
        I915_WRITE(MEMSWCTL, rgvswctl);
@@ -4326,19 +4668,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
        new_power = dev_priv->rps.power;
        switch (dev_priv->rps.power) {
        case LOW_POWER:
-               if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
+               if (val > dev_priv->rps.efficient_freq + 1 &&
+                   val > dev_priv->rps.cur_freq)
                        new_power = BETWEEN;
                break;
 
        case BETWEEN:
-               if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
+               if (val <= dev_priv->rps.efficient_freq &&
+                   val < dev_priv->rps.cur_freq)
                        new_power = LOW_POWER;
-               else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
+               else if (val >= dev_priv->rps.rp0_freq &&
+                        val > dev_priv->rps.cur_freq)
                        new_power = HIGH_POWER;
                break;
 
        case HIGH_POWER:
-               if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
+               if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
+                   val < dev_priv->rps.cur_freq)
                        new_power = BETWEEN;
                break;
        }
@@ -4384,22 +4730,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
        }
 
        I915_WRITE(GEN6_RP_UP_EI,
-               GT_INTERVAL_FROM_US(dev_priv, ei_up));
+                  GT_INTERVAL_FROM_US(dev_priv, ei_up));
        I915_WRITE(GEN6_RP_UP_THRESHOLD,
-               GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100)));
+                  GT_INTERVAL_FROM_US(dev_priv,
+                                      ei_up * threshold_up / 100));
 
        I915_WRITE(GEN6_RP_DOWN_EI,
-               GT_INTERVAL_FROM_US(dev_priv, ei_down));
+                  GT_INTERVAL_FROM_US(dev_priv, ei_down));
        I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
-               GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100)));
+                  GT_INTERVAL_FROM_US(dev_priv,
+                                      ei_down * threshold_down / 100));
 
-        I915_WRITE(GEN6_RP_CONTROL,
-                   GEN6_RP_MEDIA_TURBO |
-                   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                   GEN6_RP_MEDIA_IS_GFX |
-                   GEN6_RP_ENABLE |
-                   GEN6_RP_UP_BUSY_AVG |
-                   GEN6_RP_DOWN_IDLE_AVG);
+       I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_TURBO |
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                  GEN6_RP_MEDIA_IS_GFX |
+                  GEN6_RP_ENABLE |
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_AVG);
 
        dev_priv->rps.power = new_power;
        dev_priv->rps.up_threshold = threshold_up;
@@ -4424,12 +4772,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
 /* gen6_set_rps is called to update the frequency request, but should also be
  * called when the range (min_delay and max_delay) is modified so that we can
  * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static void gen6_set_rps(struct drm_device *dev, u8 val)
+static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                return;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4442,10 +4788,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
        if (val != dev_priv->rps.cur_freq) {
                gen6_set_rps_thresholds(dev_priv, val);
 
-               if (IS_GEN9(dev))
+               if (IS_GEN9(dev_priv))
                        I915_WRITE(GEN6_RPNSWREQ,
                                   GEN9_FREQUENCY(val));
-               else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                        I915_WRITE(GEN6_RPNSWREQ,
                                   HSW_FREQUENCY(val));
                else
@@ -4467,15 +4813,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
        trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 }
 
-static void valleyview_set_rps(struct drm_device *dev, u8 val)
+static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
        WARN_ON(val > dev_priv->rps.max_freq);
        WARN_ON(val < dev_priv->rps.min_freq);
 
-       if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
+       if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
                      "Odd GPU freq value\n"))
                val &= ~1;
 
@@ -4508,7 +4852,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
        /* Wake up the media well, as that takes a lot less
         * power than the Render well. */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
-       valleyview_set_rps(dev_priv->dev, val);
+       valleyview_set_rps(dev_priv, val);
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
 }
 
@@ -4520,20 +4864,33 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
                        gen6_rps_reset_ei(dev_priv);
                I915_WRITE(GEN6_PMINTRMSK,
                           gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
+
+               gen6_enable_rps_interrupts(dev_priv);
+
+               /* Ensure we start at the user's desired frequency */
+               intel_set_rps(dev_priv,
+                             clamp(dev_priv->rps.cur_freq,
+                                   dev_priv->rps.min_freq_softlimit,
+                                   dev_priv->rps.max_freq_softlimit));
        }
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
 void gen6_rps_idle(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       /* Flush our bottom-half so that it does not race with us
+        * setting the idle frequency and so that it is bounded by
+        * our rpm wakeref. And then disable the interrupts to stop any
+        * futher RPS reclocking whilst we are asleep.
+        */
+       gen6_disable_rps_interrupts(dev_priv);
 
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
-               if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+               if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                        vlv_set_rps_idle(dev_priv);
                else
-                       gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+                       gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
                dev_priv->rps.last_adj = 0;
                I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
        }
@@ -4552,7 +4909,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
        /* This is intentionally racy! We peek at the state here, then
         * validate inside the RPS worker.
         */
-       if (!(dev_priv->mm.busy &&
+       if (!(dev_priv->gt.awake &&
              dev_priv->rps.enabled &&
              dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
                return;
@@ -4568,7 +4925,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
                spin_lock_irq(&dev_priv->irq_lock);
                if (dev_priv->rps.interrupts_enabled) {
                        dev_priv->rps.client_boost = true;
-                       queue_work(dev_priv->wq, &dev_priv->rps.work);
+                       schedule_work(&dev_priv->rps.work);
                }
                spin_unlock_irq(&dev_priv->irq_lock);
 
@@ -4581,49 +4938,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
        spin_unlock(&dev_priv->rps.client_lock);
 }
 
-void intel_set_rps(struct drm_device *dev, u8 val)
+void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
-               valleyview_set_rps(dev, val);
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               valleyview_set_rps(dev_priv, val);
        else
-               gen6_set_rps(dev, val);
+               gen6_set_rps(dev_priv, val);
 }
 
-static void gen9_disable_rc6(struct drm_device *dev)
+static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        I915_WRITE(GEN6_RC_CONTROL, 0);
        I915_WRITE(GEN9_PG_ENABLE, 0);
 }
 
-static void gen9_disable_rps(struct drm_device *dev)
+static void gen9_disable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        I915_WRITE(GEN6_RP_CONTROL, 0);
 }
 
-static void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        I915_WRITE(GEN6_RC_CONTROL, 0);
        I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
        I915_WRITE(GEN6_RP_CONTROL, 0);
 }
 
-static void cherryview_disable_rps(struct drm_device *dev)
+static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        I915_WRITE(GEN6_RC_CONTROL, 0);
 }
 
-static void valleyview_disable_rps(struct drm_device *dev)
+static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        /* we're doing forcewake before Disabling RC6,
         * This what the BIOS expects when going into suspend */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4633,34 +4980,45 @@ static void valleyview_disable_rps(struct drm_device *dev)
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
 {
-       if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
                        mode = GEN6_RC_CTL_RC6_ENABLE;
                else
                        mode = 0;
        }
-       if (HAS_RC6p(dev))
-               DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
-                             onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
-                             onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
-                             onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
+       if (HAS_RC6p(dev_priv))
+               DRM_DEBUG_DRIVER("Enabling RC6 states: "
+                                "RC6 %s RC6p %s RC6pp %s\n",
+                                onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
+                                onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
+                                onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
 
        else
-               DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
-                             onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
+               DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
+                                onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
 }
 
-static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
+static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        bool enable_rc6 = true;
        unsigned long rc6_ctx_base;
+       u32 rc_ctl;
+       int rc_sw_target;
+
+       rc_ctl = I915_READ(GEN6_RC_CONTROL);
+       rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
+                      RC_SW_TARGET_STATE_SHIFT;
+       DRM_DEBUG_DRIVER("BIOS enabled RC states: "
+                        "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
+                        onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
+                        onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
+                        rc_sw_target);
 
        if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
-               DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
+               DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
                enable_rc6 = false;
        }
 
@@ -4672,7 +5030,7 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
        if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
              (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
                                        ggtt->stolen_reserved_size))) {
-               DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
+               DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
                enable_rc6 = false;
        }
 
@@ -4680,31 +5038,40 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
              ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
              ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
              ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
-               DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
+               DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
                enable_rc6 = false;
        }
 
-       if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
-                                           GEN6_RC_CTL_HW_ENABLE)) &&
-           ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
-            !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
-               DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
+       if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
+           !I915_READ(GEN8_PUSHBUS_ENABLE) ||
+           !I915_READ(GEN8_PUSHBUS_SHIFT)) {
+               DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
+               enable_rc6 = false;
+       }
+
+       if (!I915_READ(GEN6_GFXPAUSE)) {
+               DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
+               enable_rc6 = false;
+       }
+
+       if (!I915_READ(GEN8_MISC_CTRL0)) {
+               DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
                enable_rc6 = false;
        }
 
        return enable_rc6;
 }
 
-int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
 {
        /* No RC6 before Ironlake and code is gone for ilk. */
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_INFO(dev_priv)->gen < 6)
                return 0;
 
        if (!enable_rc6)
                return 0;
 
-       if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
+       if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
                DRM_INFO("RC6 disabled by BIOS\n");
                return 0;
        }
@@ -4713,33 +5080,28 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
        if (enable_rc6 >= 0) {
                int mask;
 
-               if (HAS_RC6p(dev))
+               if (HAS_RC6p(dev_priv))
                        mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
                               INTEL_RC6pp_ENABLE;
                else
                        mask = INTEL_RC6_ENABLE;
 
                if ((enable_rc6 & mask) != enable_rc6)
-                       DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
-                                     enable_rc6 & mask, enable_rc6, mask);
+                       DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
+                                        "(requested %d, valid %d)\n",
+                                        enable_rc6 & mask, enable_rc6, mask);
 
                return enable_rc6 & mask;
        }
 
-       if (IS_IVYBRIDGE(dev))
+       if (IS_IVYBRIDGE(dev_priv))
                return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 
        return INTEL_RC6_ENABLE;
 }
 
-int intel_enable_rc6(const struct drm_device *dev)
+static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 {
-       return i915.enable_rc6;
-}
-
-static void gen6_init_rps_frequencies(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t rp_state_cap;
        u32 ddcc_status = 0;
        int ret;
@@ -4747,7 +5109,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
        /* All of these values are in units of 50MHz */
        dev_priv->rps.cur_freq          = 0;
        /* static values from HW: RP0 > RP1 > RPn (min_freq) */
-       if (IS_BROXTON(dev)) {
+       if (IS_BROXTON(dev_priv)) {
                rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
                dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
                dev_priv->rps.rp1_freq = (rp_state_cap >>  8) & 0xff;
@@ -4763,8 +5125,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
        dev_priv->rps.max_freq          = dev_priv->rps.rp0_freq;
 
        dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
-           IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+           IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                ret = sandybridge_pcode_read(dev_priv,
                                        HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
                                        &ddcc_status);
@@ -4776,7 +5138,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
                                        dev_priv->rps.max_freq);
        }
 
-       if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                /* Store the frequency values in 16.66 MHZ units, which is
                   the natural hardware unit for SKL */
                dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4793,7 +5155,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
                dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
 
        if (dev_priv->rps.min_freq_softlimit == 0) {
-               if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                        dev_priv->rps.min_freq_softlimit =
                                max_t(int, dev_priv->rps.efficient_freq,
                                      intel_freq_opcode(dev_priv, 450));
@@ -4804,16 +5166,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
 }
 
 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
-static void gen9_enable_rps(struct drm_device *dev)
+static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       gen6_init_rps_frequencies(dev);
+       gen6_init_rps_frequencies(dev_priv);
 
        /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                /*
                 * BIOS could leave the Hw Turbo enabled, so need to explicitly
                 * clear out the Control register just to avoid inconsitency
@@ -4823,7 +5183,7 @@ static void gen9_enable_rps(struct drm_device *dev)
                 * if the Turbo is left enabled in the Control register, as the
                 * Up/Down interrupts would remain masked.
                 */
-               gen9_disable_rps(dev);
+               gen9_disable_rps(dev_priv);
                intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
                return;
        }
@@ -4842,14 +5202,13 @@ static void gen9_enable_rps(struct drm_device *dev)
         * Up/Down EI & threshold registers, as well as the RP_CONTROL,
         * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
        dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+       gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void gen9_enable_rc6(struct drm_device *dev)
+static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        uint32_t rc6_mask = 0;
 
@@ -4866,7 +5225,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
        /* 2b: Program RC6 thresholds.*/
 
        /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
-       if (IS_SKYLAKE(dev))
+       if (IS_SKYLAKE(dev_priv))
                I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
        else
                I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4875,7 +5234,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
        for_each_engine(engine, dev_priv)
                I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
 
-       if (HAS_GUC_UCODE(dev))
+       if (HAS_GUC(dev_priv))
                I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
 
        I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -4885,12 +5244,12 @@ static void gen9_enable_rc6(struct drm_device *dev)
        I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
 
        /* 3a: Enable RC6 */
-       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+       if (intel_enable_rc6() & INTEL_RC6_ENABLE)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
        DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
        /* WaRsUseTimeoutMode */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
                I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
                           GEN7_RC_CTL_TO_MODE |
@@ -4906,19 +5265,17 @@ static void gen9_enable_rc6(struct drm_device *dev)
         * 3b: Enable Coarse Power Gating only when RC6 is enabled.
         * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
         */
-       if (NEEDS_WaRsDisableCoarsePowerGating(dev))
+       if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
                I915_WRITE(GEN9_PG_ENABLE, 0);
        else
                I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
                                (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
 }
 
-static void gen8_enable_rps(struct drm_device *dev)
+static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        uint32_t rc6_mask = 0;
 
@@ -4933,7 +5290,7 @@ static void gen8_enable_rps(struct drm_device *dev)
        I915_WRITE(GEN6_RC_CONTROL, 0);
 
        /* Initialize rps frequencies */
-       gen6_init_rps_frequencies(dev);
+       gen6_init_rps_frequencies(dev_priv);
 
        /* 2b: Program RC6 thresholds.*/
        I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -4942,16 +5299,16 @@ static void gen8_enable_rps(struct drm_device *dev)
        for_each_engine(engine, dev_priv)
                I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
        I915_WRITE(GEN6_RC_SLEEP, 0);
-       if (IS_BROADWELL(dev))
+       if (IS_BROADWELL(dev_priv))
                I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
        else
                I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
 
        /* 3: Enable RC6 */
-       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+       if (intel_enable_rc6() & INTEL_RC6_ENABLE)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-       intel_print_rc6_info(dev, rc6_mask);
-       if (IS_BROADWELL(dev))
+       intel_print_rc6_info(dev_priv, rc6_mask);
+       if (IS_BROADWELL(dev_priv))
                I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
                                GEN7_RC_CTL_TO_MODE |
                                rc6_mask);
@@ -4992,14 +5349,13 @@ static void gen8_enable_rps(struct drm_device *dev)
        /* 6: Ring frequency + overclocking (our driver does this later */
 
        dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+       gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void gen6_enable_rps(struct drm_device *dev)
+static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
        u32 gtfifodbg;
@@ -5026,7 +5382,7 @@ static void gen6_enable_rps(struct drm_device *dev)
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /* Initialize rps frequencies */
-       gen6_init_rps_frequencies(dev);
+       gen6_init_rps_frequencies(dev_priv);
 
        /* disable the counters and set deterministic thresholds */
        I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -5042,7 +5398,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 
        I915_WRITE(GEN6_RC_SLEEP, 0);
        I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
-       if (IS_IVYBRIDGE(dev))
+       if (IS_IVYBRIDGE(dev_priv))
                I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
        else
                I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -5050,12 +5406,12 @@ static void gen6_enable_rps(struct drm_device *dev)
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
        /* Check if we are enabling RC6 */
-       rc6_mode = intel_enable_rc6(dev_priv->dev);
+       rc6_mode = intel_enable_rc6();
        if (rc6_mode & INTEL_RC6_ENABLE)
                rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
 
        /* We don't use those on Haswell */
-       if (!IS_HASWELL(dev)) {
+       if (!IS_HASWELL(dev_priv)) {
                if (rc6_mode & INTEL_RC6p_ENABLE)
                        rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
 
@@ -5063,7 +5419,7 @@ static void gen6_enable_rps(struct drm_device *dev)
                        rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
        }
 
-       intel_print_rc6_info(dev, rc6_mask);
+       intel_print_rc6_info(dev_priv, rc6_mask);
 
        I915_WRITE(GEN6_RC_CONTROL,
                   rc6_mask |
@@ -5087,13 +5443,13 @@ static void gen6_enable_rps(struct drm_device *dev)
        }
 
        dev_priv->rps.power = HIGH_POWER; /* force a reset */
-       gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+       gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
 
        rc6vids = 0;
        ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
-       if (IS_GEN6(dev) && ret) {
+       if (IS_GEN6(dev_priv) && ret) {
                DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
-       } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+       } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
                DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
                          GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
                rc6vids &= 0xffff00;
@@ -5106,9 +5462,8 @@ static void gen6_enable_rps(struct drm_device *dev)
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void __gen6_update_ring_freq(struct drm_device *dev)
+static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int min_freq = 15;
        unsigned int gpu_freq;
        unsigned int max_ia_freq, min_ring_freq;
@@ -5137,7 +5492,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
        /* convert DDR frequency from units of 266.6MHz to bandwidth */
        min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
-       if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                /* Convert GT frequency to 50 HZ units */
                min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
                max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5155,16 +5510,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
                int diff = max_gpu_freq - gpu_freq;
                unsigned int ia_freq = 0, ring_freq = 0;
 
-               if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+               if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                        /*
                         * ring_freq = 2 * GT. ring_freq is in 100MHz units
                         * No floor required for ring frequency on SKL.
                         */
                        ring_freq = gpu_freq;
-               } else if (INTEL_INFO(dev)->gen >= 8) {
+               } else if (INTEL_INFO(dev_priv)->gen >= 8) {
                        /* max(2 * GT, DDR). NB: GT is 50MHz units */
                        ring_freq = max(min_ring_freq, gpu_freq);
-               } else if (IS_HASWELL(dev)) {
+               } else if (IS_HASWELL(dev_priv)) {
                        ring_freq = mult_frac(gpu_freq, 5, 4);
                        ring_freq = max(min_ring_freq, ring_freq);
                        /* leave ia_freq as the default, chosen by cpufreq */
@@ -5191,26 +5546,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
        }
 }
 
-void gen6_update_ring_freq(struct drm_device *dev)
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (!HAS_CORE_RING_FREQ(dev))
+       if (!HAS_CORE_RING_FREQ(dev_priv))
                return;
 
        mutex_lock(&dev_priv->rps.hw_lock);
-       __gen6_update_ring_freq(dev);
+       __gen6_update_ring_freq(dev_priv);
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
        u32 val, rp0;
 
        val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
 
-       switch (INTEL_INFO(dev)->eu_total) {
+       switch (INTEL_INFO(dev_priv)->eu_total) {
        case 8:
                /* (2 * 4) config */
                rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5321,9 +5673,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
        WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
 }
 
-static void cherryview_setup_pctx(struct drm_device *dev)
+static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
        unsigned long pctx_paddr, paddr;
        u32 pcbr;
@@ -5342,15 +5693,14 @@ static void cherryview_setup_pctx(struct drm_device *dev)
        DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
 }
 
-static void valleyview_setup_pctx(struct drm_device *dev)
+static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *pctx;
        unsigned long pctx_paddr;
        u32 pcbr;
        int pctx_size = 24*1024;
 
-       mutex_lock(&dev->struct_mutex);
+       mutex_lock(&dev_priv->drm.struct_mutex);
 
        pcbr = I915_READ(VLV_PCBR);
        if (pcbr) {
@@ -5358,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
                int pcbr_offset;
 
                pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
-               pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
+               pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
                                                                      pcbr_offset,
                                                                      I915_GTT_OFFSET_NONE,
                                                                      pctx_size);
@@ -5375,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
         * overlap with other ranges, such as the frame buffer, protected
         * memory, or any other relevant ranges.
         */
-       pctx = i915_gem_object_create_stolen(dev, pctx_size);
+       pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
        if (!pctx) {
                DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
                goto out;
@@ -5387,13 +5737,11 @@ static void valleyview_setup_pctx(struct drm_device *dev)
 out:
        DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
        dev_priv->vlv_pctx = pctx;
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
-static void valleyview_cleanup_pctx(struct drm_device *dev)
+static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        if (WARN_ON(!dev_priv->vlv_pctx))
                return;
 
@@ -5412,12 +5760,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
                         dev_priv->rps.gpll_ref_freq);
 }
 
-static void valleyview_init_gt_powersave(struct drm_device *dev)
+static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val;
 
-       valleyview_setup_pctx(dev);
+       valleyview_setup_pctx(dev_priv);
 
        vlv_init_gpll_ref_freq(dev_priv);
 
@@ -5471,12 +5818,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-static void cherryview_init_gt_powersave(struct drm_device *dev)
+static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val;
 
-       cherryview_setup_pctx(dev);
+       cherryview_setup_pctx(dev_priv);
 
        vlv_init_gpll_ref_freq(dev_priv);
 
@@ -5536,14 +5882,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
-static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
+static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       valleyview_cleanup_pctx(dev);
+       valleyview_cleanup_pctx(dev_priv);
 }
 
-static void cherryview_enable_rps(struct drm_device *dev)
+static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        u32 gtfifodbg, val, rc6_mode = 0, pcbr;
 
@@ -5588,8 +5933,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
        pcbr = I915_READ(VLV_PCBR);
 
        /* 3: Enable RC6 */
-       if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
-                                               (pcbr >> VLV_PCBR_ADDR_SHIFT))
+       if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
+           (pcbr >> VLV_PCBR_ADDR_SHIFT))
                rc6_mode = GEN7_RC_CTL_TO_MODE;
 
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5634,14 +5979,13 @@ static void cherryview_enable_rps(struct drm_device *dev)
                         intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
                         dev_priv->rps.idle_freq);
 
-       valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+       valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static void valleyview_enable_rps(struct drm_device *dev)
+static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        u32 gtfifodbg, val, rc6_mode = 0;
 
@@ -5694,10 +6038,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
                                      VLV_MEDIA_RC6_COUNT_EN |
                                      VLV_RENDER_RC6_COUNT_EN));
 
-       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+       if (intel_enable_rc6() & INTEL_RC6_ENABLE)
                rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
 
-       intel_print_rc6_info(dev, rc6_mode);
+       intel_print_rc6_info(dev_priv, rc6_mode);
 
        I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
@@ -5724,7 +6068,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
                         intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
                         dev_priv->rps.idle_freq);
 
-       valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
+       valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
 
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -5814,10 +6158,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
 
 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
        unsigned long val;
 
-       if (INTEL_INFO(dev)->gen != 5)
+       if (INTEL_INFO(dev_priv)->gen != 5)
                return 0;
 
        spin_lock_irq(&mchdev_lock);
@@ -5857,11 +6200,10 @@ static int _pxvid_to_vd(u8 pxvid)
 
 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
 {
-       struct drm_device *dev = dev_priv->dev;
        const int vd = _pxvid_to_vd(pxvid);
        const int vm = vd - 1125;
 
-       if (INTEL_INFO(dev)->is_mobile)
+       if (INTEL_INFO(dev_priv)->is_mobile)
                return vm > 0 ? vm : 0;
 
        return vd;
@@ -5902,9 +6244,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
 
 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
-
-       if (INTEL_INFO(dev)->gen != 5)
+       if (INTEL_INFO(dev_priv)->gen != 5)
                return;
 
        spin_lock_irq(&mchdev_lock);
@@ -5953,10 +6293,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
 
 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
        unsigned long val;
 
-       if (INTEL_INFO(dev)->gen != 5)
+       if (INTEL_INFO(dev_priv)->gen != 5)
                return 0;
 
        spin_lock_irq(&mchdev_lock);
@@ -6097,7 +6436,7 @@ bool i915_gpu_turbo_disable(void)
 
        dev_priv->ips.max_delay = dev_priv->ips.fstart;
 
-       if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
+       if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
                ret = false;
 
 out_unlock:
@@ -6145,9 +6484,8 @@ void intel_gpu_ips_teardown(void)
        spin_unlock_irq(&mchdev_lock);
 }
 
-static void intel_init_emon(struct drm_device *dev)
+static void intel_init_emon(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 lcfuse;
        u8 pxw[16];
        int i;
@@ -6216,10 +6554,8 @@ static void intel_init_emon(struct drm_device *dev)
        dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
-void intel_init_gt_powersave(struct drm_device *dev)
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        /*
         * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
         * requirement.
@@ -6229,74 +6565,66 @@ void intel_init_gt_powersave(struct drm_device *dev)
                intel_runtime_pm_get(dev_priv);
        }
 
-       if (IS_CHERRYVIEW(dev))
-               cherryview_init_gt_powersave(dev);
-       else if (IS_VALLEYVIEW(dev))
-               valleyview_init_gt_powersave(dev);
+       if (IS_CHERRYVIEW(dev_priv))
+               cherryview_init_gt_powersave(dev_priv);
+       else if (IS_VALLEYVIEW(dev_priv))
+               valleyview_init_gt_powersave(dev_priv);
 }
 
-void intel_cleanup_gt_powersave(struct drm_device *dev)
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (IS_CHERRYVIEW(dev))
+       if (IS_CHERRYVIEW(dev_priv))
                return;
-       else if (IS_VALLEYVIEW(dev))
-               valleyview_cleanup_gt_powersave(dev);
+       else if (IS_VALLEYVIEW(dev_priv))
+               valleyview_cleanup_gt_powersave(dev_priv);
 
        if (!i915.enable_rc6)
                intel_runtime_pm_put(dev_priv);
 }
 
-static void gen6_suspend_rps(struct drm_device *dev)
+static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
-       gen6_disable_rps_interrupts(dev);
+       gen6_disable_rps_interrupts(dev_priv);
 }
 
 /**
  * intel_suspend_gt_powersave - suspend PM work and helper threads
- * @dev: drm device
+ * @dev_priv: i915 device
  *
  * We don't want to disable RC6 or other features here, we just want
  * to make sure any work we've queued has finished and won't bother
  * us while we're suspended.
  */
-void intel_suspend_gt_powersave(struct drm_device *dev)
+void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_GEN(dev_priv) < 6)
                return;
 
-       gen6_suspend_rps(dev);
+       gen6_suspend_rps(dev_priv);
 
        /* Force GPU to min freq during suspend */
        gen6_rps_idle(dev_priv);
 }
 
-void intel_disable_gt_powersave(struct drm_device *dev)
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (IS_IRONLAKE_M(dev)) {
-               ironlake_disable_drps(dev);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               intel_suspend_gt_powersave(dev);
+       if (IS_IRONLAKE_M(dev_priv)) {
+               ironlake_disable_drps(dev_priv);
+       } else if (INTEL_INFO(dev_priv)->gen >= 6) {
+               intel_suspend_gt_powersave(dev_priv);
 
                mutex_lock(&dev_priv->rps.hw_lock);
-               if (INTEL_INFO(dev)->gen >= 9) {
-                       gen9_disable_rc6(dev);
-                       gen9_disable_rps(dev);
-               } else if (IS_CHERRYVIEW(dev))
-                       cherryview_disable_rps(dev);
-               else if (IS_VALLEYVIEW(dev))
-                       valleyview_disable_rps(dev);
+               if (INTEL_INFO(dev_priv)->gen >= 9) {
+                       gen9_disable_rc6(dev_priv);
+                       gen9_disable_rps(dev_priv);
+               } else if (IS_CHERRYVIEW(dev_priv))
+                       cherryview_disable_rps(dev_priv);
+               else if (IS_VALLEYVIEW(dev_priv))
+                       valleyview_disable_rps(dev_priv);
                else
-                       gen6_disable_rps(dev);
+                       gen6_disable_rps(dev_priv);
 
                dev_priv->rps.enabled = false;
                mutex_unlock(&dev_priv->rps.hw_lock);
@@ -6308,27 +6636,26 @@ static void intel_gen6_powersave_work(struct work_struct *work)
        struct drm_i915_private *dev_priv =
                container_of(work, struct drm_i915_private,
                             rps.delayed_resume_work.work);
-       struct drm_device *dev = dev_priv->dev;
 
        mutex_lock(&dev_priv->rps.hw_lock);
 
-       gen6_reset_rps_interrupts(dev);
+       gen6_reset_rps_interrupts(dev_priv);
 
-       if (IS_CHERRYVIEW(dev)) {
-               cherryview_enable_rps(dev);
-       } else if (IS_VALLEYVIEW(dev)) {
-               valleyview_enable_rps(dev);
-       } else if (INTEL_INFO(dev)->gen >= 9) {
-               gen9_enable_rc6(dev);
-               gen9_enable_rps(dev);
-               if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-                       __gen6_update_ring_freq(dev);
-       } else if (IS_BROADWELL(dev)) {
-               gen8_enable_rps(dev);
-               __gen6_update_ring_freq(dev);
+       if (IS_CHERRYVIEW(dev_priv)) {
+               cherryview_enable_rps(dev_priv);
+       } else if (IS_VALLEYVIEW(dev_priv)) {
+               valleyview_enable_rps(dev_priv);
+       } else if (INTEL_INFO(dev_priv)->gen >= 9) {
+               gen9_enable_rc6(dev_priv);
+               gen9_enable_rps(dev_priv);
+               if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+                       __gen6_update_ring_freq(dev_priv);
+       } else if (IS_BROADWELL(dev_priv)) {
+               gen8_enable_rps(dev_priv);
+               __gen6_update_ring_freq(dev_priv);
        } else {
-               gen6_enable_rps(dev);
-               __gen6_update_ring_freq(dev);
+               gen6_enable_rps(dev_priv);
+               __gen6_update_ring_freq(dev_priv);
        }
 
        WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6339,27 +6666,25 @@ static void intel_gen6_powersave_work(struct work_struct *work)
 
        dev_priv->rps.enabled = true;
 
-       gen6_enable_rps_interrupts(dev);
+       gen6_enable_rps_interrupts(dev_priv);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 
        intel_runtime_pm_put(dev_priv);
 }
 
-void intel_enable_gt_powersave(struct drm_device *dev)
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        /* Powersaving is controlled by the host when inside a VM */
-       if (intel_vgpu_active(dev))
+       if (intel_vgpu_active(dev_priv))
                return;
 
-       if (IS_IRONLAKE_M(dev)) {
-               ironlake_enable_drps(dev);
-               mutex_lock(&dev->struct_mutex);
-               intel_init_emon(dev);
-               mutex_unlock(&dev->struct_mutex);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       if (IS_IRONLAKE_M(dev_priv)) {
+               ironlake_enable_drps(dev_priv);
+               mutex_lock(&dev_priv->drm.struct_mutex);
+               intel_init_emon(dev_priv);
+               mutex_unlock(&dev_priv->drm.struct_mutex);
+       } else if (INTEL_INFO(dev_priv)->gen >= 6) {
                /*
                 * PCU communication is slow and this doesn't need to be
                 * done at any specific time, so do this out of our fast path
@@ -6378,20 +6703,18 @@ void intel_enable_gt_powersave(struct drm_device *dev)
        }
 }
 
-void intel_reset_gt_powersave(struct drm_device *dev)
+void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (INTEL_INFO(dev)->gen < 6)
+       if (INTEL_INFO(dev_priv)->gen < 6)
                return;
 
-       gen6_suspend_rps(dev);
+       gen6_suspend_rps(dev_priv);
        dev_priv->rps.enabled = false;
 }
 
 static void ibx_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /*
         * On Ibex Peak and Cougar Point, we need to disable clock
@@ -6403,7 +6726,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
 
 static void g4x_disable_trickle_feed(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
 
        for_each_pipe(dev_priv, pipe) {
@@ -6418,7 +6741,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
 
 static void ilk_init_lp_watermarks(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
        I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6432,7 +6755,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
 
 static void ironlake_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
        /*
@@ -6506,7 +6829,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
 
 static void cpt_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int pipe;
        uint32_t val;
 
@@ -6543,7 +6866,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
 
 static void gen6_check_mch_setup(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t tmp;
 
        tmp = I915_READ(MCH_SSKPD);
@@ -6554,7 +6877,7 @@ static void gen6_check_mch_setup(struct drm_device *dev)
 
 static void gen6_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
        I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -6669,7 +6992,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
 
 static void lpt_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /*
         * TODO: this bit should only be enabled when really needed, then
@@ -6688,7 +7011,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
 
 static void lpt_suspend_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (HAS_PCH_LPT_LP(dev)) {
                uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -6698,11 +7021,69 @@ static void lpt_suspend_hw(struct drm_device *dev)
        }
 }
 
-static void broadwell_init_clock_gating(struct drm_device *dev)
+static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
+                                  int general_prio_credits,
+                                  int high_prio_credits)
+{
+       u32 misccpctl;
+
+       /* WaTempDisableDOPClkGating:bdw */
+       misccpctl = I915_READ(GEN7_MISCCPCTL);
+       I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+
+       I915_WRITE(GEN8_L3SQCREG1,
+                  L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
+                  L3_HIGH_PRIO_CREDITS(high_prio_credits));
+
+       /*
+        * Wait at least 100 clocks before re-enabling clock gating.
+        * See the definition of L3SQCREG1 in BSpec.
+        */
+       POSTING_READ(GEN8_L3SQCREG1);
+       udelay(1);
+       I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+}
+
+static void kabylake_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       gen9_init_clock_gating(dev);
+
+       /* WaDisableSDEUnitClockGating:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                          GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaDisableGamClockGating:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+                          GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaFbcNukeOnHostModify:kbl */
+       I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+                  ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void skylake_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+
+       gen9_init_clock_gating(dev);
+
+       /* WAC6entrylatency:skl */
+       I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
+                  FBC_LLC_FULLY_OPEN);
+
+       /* WaFbcNukeOnHostModify:skl */
+       I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+                  ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void broadwell_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
        enum pipe pipe;
-       uint32_t misccpctl;
 
        ilk_init_lp_watermarks(dev);
 
@@ -6733,20 +7114,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
                   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
-       /*
-        * WaProgramL3SqcReg1Default:bdw
-        * WaTempDisableDOPClkGating:bdw
-        */
-       misccpctl = I915_READ(GEN7_MISCCPCTL);
-       I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-       I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
-       /*
-        * Wait at least 100 clocks before re-enabling clock gating. See
-        * the definition of L3SQCREG1 in BSpec.
-        */
-       POSTING_READ(GEN8_L3SQCREG1);
-       udelay(1);
-       I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+       /* WaProgramL3SqcReg1Default:bdw */
+       gen8_set_l3sqc_credits(dev_priv, 30, 2);
 
        /*
         * WaGttCachingOffByDefault:bdw
@@ -6755,12 +7124,16 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
         */
        I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
 
+       /* WaKVMNotificationOnConfigChange:bdw */
+       I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
+                  | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
+
        lpt_init_clock_gating(dev);
 }
 
 static void haswell_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        ilk_init_lp_watermarks(dev);
 
@@ -6816,7 +7189,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
 
 static void ivybridge_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t snpcr;
 
        ilk_init_lp_watermarks(dev);
@@ -6914,7 +7287,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
 
 static void valleyview_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* WaDisableEarlyCull:vlv */
        I915_WRITE(_3D_CHICKEN3,
@@ -6996,7 +7369,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
 
 static void cherryview_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* WaVSRefCountFullforceMissDisable:chv */
        /* WaDSRefCountFullforceMissDisable:chv */
@@ -7016,6 +7389,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
                   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
+       /*
+        * WaProgramL3SqcReg1Default:chv
+        * See gfxspecs/Related Documents/Performance Guide/
+        * LSQC Setting Recommendations.
+        */
+       gen8_set_l3sqc_credits(dev_priv, 38, 2);
+
        /*
         * GTT cache may not work with big pages, so if those
         * are ever enabled GTT cache may need to be disabled.
@@ -7025,7 +7405,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
 
 static void g4x_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t dspclk_gate;
 
        I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7052,7 +7432,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
 
 static void crestline_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
        I915_WRITE(RENCLK_GATE_D2, 0);
@@ -7068,7 +7448,7 @@ static void crestline_init_clock_gating(struct drm_device *dev)
 
 static void broadwater_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
                   I965_RCC_CLOCK_GATE_DISABLE |
@@ -7085,7 +7465,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
 
 static void gen3_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 dstate = I915_READ(D_STATE);
 
        dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -7110,7 +7490,7 @@ static void gen3_init_clock_gating(struct drm_device *dev)
 
 static void i85x_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
 
@@ -7124,7 +7504,7 @@ static void i85x_init_clock_gating(struct drm_device *dev)
 
 static void i830_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 
@@ -7135,7 +7515,7 @@ static void i830_init_clock_gating(struct drm_device *dev)
 
 void intel_init_clock_gating(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->display.init_clock_gating(dev);
 }
@@ -7163,9 +7543,9 @@ static void nop_init_clock_gating(struct drm_device *dev)
 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 {
        if (IS_SKYLAKE(dev_priv))
-               dev_priv->display.init_clock_gating = nop_init_clock_gating;
+               dev_priv->display.init_clock_gating = skylake_init_clock_gating;
        else if (IS_KABYLAKE(dev_priv))
-               dev_priv->display.init_clock_gating = nop_init_clock_gating;
+               dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
        else if (IS_BROXTON(dev_priv))
                dev_priv->display.init_clock_gating = bxt_init_clock_gating;
        else if (IS_BROADWELL(dev_priv))
@@ -7203,7 +7583,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 /* Set up chip specific power management-related functions */
 void intel_init_pm(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        intel_fbc_init(dev_priv);
 
@@ -7217,6 +7597,7 @@ void intel_init_pm(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen >= 9) {
                skl_setup_wm_latency(dev);
                dev_priv->display.update_wm = skl_update_wm;
+               dev_priv->display.compute_global_watermarks = skl_compute_wm;
        } else if (HAS_PCH_SPLIT(dev)) {
                ilk_setup_wm_latency(dev);
 
@@ -7280,46 +7661,59 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
 {
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+       /* GEN6_PCODE_* are outside of the forcewake domain, we can
+        * use te fw I915_READ variants to reduce the amount of work
+        * required when reading/writing.
+        */
+
+       if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
                DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
                return -EAGAIN;
        }
 
-       I915_WRITE(GEN6_PCODE_DATA, *val);
-       I915_WRITE(GEN6_PCODE_DATA1, 0);
-       I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+       I915_WRITE_FW(GEN6_PCODE_DATA, *val);
+       I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
+       I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
-       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-                    500)) {
+       if (intel_wait_for_register_fw(dev_priv,
+                                      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+                                      500)) {
                DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
                return -ETIMEDOUT;
        }
 
-       *val = I915_READ(GEN6_PCODE_DATA);
-       I915_WRITE(GEN6_PCODE_DATA, 0);
+       *val = I915_READ_FW(GEN6_PCODE_DATA);
+       I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
        return 0;
 }
 
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
+int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
+                              u32 mbox, u32 val)
 {
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
+       /* GEN6_PCODE_* are outside of the forcewake domain, we can
+        * use te fw I915_READ variants to reduce the amount of work
+        * required when reading/writing.
+        */
+
+       if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
                DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
                return -EAGAIN;
        }
 
-       I915_WRITE(GEN6_PCODE_DATA, val);
-       I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+       I915_WRITE_FW(GEN6_PCODE_DATA, val);
+       I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
-       if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
-                    500)) {
+       if (intel_wait_for_register_fw(dev_priv,
+                                      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+                                      500)) {
                DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
                return -ETIMEDOUT;
        }
 
-       I915_WRITE(GEN6_PCODE_DATA, 0);
+       I915_WRITE_FW(GEN6_PCODE_DATA, 0);
 
        return 0;
 }
@@ -7389,23 +7783,21 @@ static void __intel_rps_boost_work(struct work_struct *work)
        struct request_boost *boost = container_of(work, struct request_boost, work);
        struct drm_i915_gem_request *req = boost->req;
 
-       if (!i915_gem_request_completed(req, true))
-               gen6_rps_boost(to_i915(req->engine->dev), NULL,
-                              req->emitted_jiffies);
+       if (!i915_gem_request_completed(req))
+               gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
 
-       i915_gem_request_unreference__unlocked(req);
+       i915_gem_request_unreference(req);
        kfree(boost);
 }
 
-void intel_queue_rps_boost_for_request(struct drm_device *dev,
-                                      struct drm_i915_gem_request *req)
+void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
 {
        struct request_boost *boost;
 
-       if (req == NULL || INTEL_INFO(dev)->gen < 6)
+       if (req == NULL || INTEL_GEN(req->i915) < 6)
                return;
 
-       if (i915_gem_request_completed(req, true))
+       if (i915_gem_request_completed(req))
                return;
 
        boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
@@ -7416,12 +7808,12 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
        boost->req = req;
 
        INIT_WORK(&boost->work, __intel_rps_boost_work);
-       queue_work(to_i915(dev)->wq, &boost->work);
+       queue_work(req->i915->wq, &boost->work);
 }
 
 void intel_pm_setup(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        mutex_init(&dev_priv->rps.hw_lock);
        spin_lock_init(&dev_priv->rps.client_lock);
index a788d1e9589b7a47f5b6fd51550be8bba1e8c7a5..68bd0bb34817fa17e3ad6da0ab19d90fe5e4a19a 100644 (file)
@@ -63,7 +63,7 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
 
 static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t val;
 
        val = I915_READ(VLV_PSRSTAT(pipe)) &
@@ -77,7 +77,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
        enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
        i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -107,7 +107,7 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
        uint32_t val;
@@ -173,10 +173,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        uint32_t aux_clock_divider;
        i915_reg_t aux_ctl_reg;
-       int precharge = 0x3;
        static const uint8_t aux_msg[] = {
                [0] = DP_AUX_NATIVE_WRITE << 4,
                [1] = DP_SET_POWER >> 8,
@@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
                [4] = DP_SET_POWER_D0,
        };
        enum port port = dig_port->port;
+       u32 aux_ctl;
        int i;
 
        BUILD_BUG_ON(sizeof(aux_msg) > 20);
@@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
                                DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
                                DP_AUX_FRAME_SYNC_ENABLE);
 
+       if (dev_priv->psr.link_standby)
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+                                  DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+       else
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+                                  DP_PSR_ENABLE);
+
        aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
 
        /* Setup AUX registers */
@@ -204,40 +211,16 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
                I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
                           intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 
-       if (INTEL_INFO(dev)->gen >= 9) {
-               uint32_t val;
-
-               val = I915_READ(aux_ctl_reg);
-               val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
-               val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
-               val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
-               val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
-               /* Use hardcoded data values for PSR, frame sync and GTC */
-               val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
-               val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
-               val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
-               I915_WRITE(aux_ctl_reg, val);
-       } else {
-               I915_WRITE(aux_ctl_reg,
-                  DP_AUX_CH_CTL_TIME_OUT_400us |
-                  (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
-                  (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
-                  (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
-       }
-
-       if (dev_priv->psr.link_standby)
-               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
-                                  DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-       else
-               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
-                                  DP_PSR_ENABLE);
+       aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
+                                            aux_clock_divider);
+       I915_WRITE(aux_ctl_reg, aux_ctl);
 }
 
 static void vlv_psr_enable_source(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
@@ -252,7 +235,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
 
@@ -269,17 +252,17 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        uint32_t max_sleep_time = 0x1f;
-       /*
-        * Let's respect VBT in case VBT asks a higher idle_frame value.
-        * Let's use 6 as the minimum to cover all known cases including
-        * the off-by-one issue that HW has in some cases. Also there are
-        * cases where sink should be able to train
-        * with the 5 or 6 idle patterns.
+       /* Lately it was identified that depending on panel idle frame count
+        * calculated at HW can be off by 1. So let's use what came
+        * from VBT + 1.
+        * There are also other cases where panel demands at least 4
+        * but VBT is not being set. To cover these 2 cases lets use
+        * at least 5 when VBT isn't set to be on the safest side.
         */
-       uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+       uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
        uint32_t val = EDP_PSR_ENABLE;
 
        val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
@@ -341,7 +324,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc = dig_port->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
@@ -395,7 +378,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
        WARN_ON(dev_priv->psr.active);
@@ -424,7 +407,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
 
        if (!HAS_PSR(dev)) {
@@ -511,15 +494,18 @@ static void vlv_psr_disable(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc =
                to_intel_crtc(intel_dig_port->base.base.crtc);
        uint32_t val;
 
        if (dev_priv->psr.active) {
                /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
-               if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
-                             VLV_EDP_PSR_IN_TRANS) == 0, 1))
+               if (intel_wait_for_register(dev_priv,
+                                           VLV_PSRSTAT(intel_crtc->pipe),
+                                           VLV_EDP_PSR_IN_TRANS,
+                                           0,
+                                           1))
                        WARN(1, "PSR transition took longer than expected\n");
 
                val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
@@ -538,16 +524,18 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (dev_priv->psr.active) {
                I915_WRITE(EDP_PSR_CTL,
                           I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
 
                /* Wait till PSR is idle */
-               if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
-                              EDP_PSR_STATUS_STATE_MASK) == 0,
-                              2 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
+               if (intel_wait_for_register(dev_priv,
+                                           EDP_PSR_STATUS_CTL,
+                                           EDP_PSR_STATUS_STATE_MASK,
+                                           0,
+                                           2000))
                        DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
                dev_priv->psr.active = false;
@@ -566,7 +554,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        mutex_lock(&dev_priv->psr.lock);
        if (!dev_priv->psr.enabled) {
@@ -603,14 +591,20 @@ static void intel_psr_work(struct work_struct *work)
         * and be ready for re-enable.
         */
        if (HAS_DDI(dev_priv)) {
-               if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
-                             EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
+               if (intel_wait_for_register(dev_priv,
+                                           EDP_PSR_STATUS_CTL,
+                                           EDP_PSR_STATUS_STATE_MASK,
+                                           0,
+                                           50)) {
                        DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
                        return;
                }
        } else {
-               if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
-                             VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
+               if (intel_wait_for_register(dev_priv,
+                                           VLV_PSRSTAT(pipe),
+                                           VLV_EDP_PSR_IN_TRANS,
+                                           0,
+                                           1)) {
                        DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
                        return;
                }
@@ -636,7 +630,7 @@ unlock:
 
 static void intel_psr_exit(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_dp *intel_dp = dev_priv->psr.enabled;
        struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -691,7 +685,7 @@ static void intel_psr_exit(struct drm_device *dev)
 void intel_psr_single_frame_update(struct drm_device *dev,
                                   unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
        u32 val;
@@ -739,7 +733,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
 void intel_psr_invalidate(struct drm_device *dev,
                          unsigned frontbuffer_bits)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -777,7 +771,7 @@ void intel_psr_invalidate(struct drm_device *dev,
 void intel_psr_flush(struct drm_device *dev,
                     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *crtc;
        enum pipe pipe;
 
@@ -813,7 +807,7 @@ void intel_psr_flush(struct drm_device *dev,
  */
 void intel_psr_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
                HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
index 04402bb9d26b9e96cac9225b37d31587891521f6..cca7792f26d5058c99edfee0d99c76d7706b0ebe 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+/* Rough estimate of the typical request size, performing a flush,
+ * set-context and then emitting the batch.
+ */
+#define LEGACY_REQUEST_SIZE 200
+
 int __intel_ring_space(int head, int tail, int size)
 {
        int space = head - tail;
@@ -53,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
                                            ringbuf->tail, ringbuf->size);
 }
 
-bool intel_engine_stopped(struct intel_engine_cs *engine)
-{
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
-       return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
-}
-
 static void __intel_ring_advance(struct intel_engine_cs *engine)
 {
        struct intel_ringbuffer *ringbuf = engine->buffer;
        ringbuf->tail &= ringbuf->size - 1;
-       if (intel_engine_stopped(engine))
-               return;
        engine->write_tail(engine, ringbuf->tail);
 }
 
@@ -101,7 +98,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      flush_domains)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
        u32 cmd;
        int ret;
 
@@ -140,7 +136,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
                cmd |= MI_EXE_FLUSH;
 
        if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-           (IS_G4X(dev) || IS_GEN5(dev)))
+           (IS_G4X(req->i915) || IS_GEN5(req->i915)))
                cmd |= MI_INVALIDATE_ISP;
 
        ret = intel_ring_begin(req, 2);
@@ -426,19 +422,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
 static void ring_write_tail(struct intel_engine_cs *engine,
                            u32 value)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        I915_WRITE_TAIL(engine, value);
 }
 
 u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u64 acthd;
 
-       if (INTEL_INFO(engine->dev)->gen >= 8)
+       if (INTEL_GEN(dev_priv) >= 8)
                acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
                                         RING_ACTHD_UDW(engine->mmio_base));
-       else if (INTEL_INFO(engine->dev)->gen >= 4)
+       else if (INTEL_GEN(dev_priv) >= 4)
                acthd = I915_READ(RING_ACTHD(engine->mmio_base));
        else
                acthd = I915_READ(ACTHD);
@@ -448,25 +444,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
 
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u32 addr;
 
        addr = dev_priv->status_page_dmah->busaddr;
-       if (INTEL_INFO(engine->dev)->gen >= 4)
+       if (INTEL_GEN(dev_priv) >= 4)
                addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
        I915_WRITE(HWS_PGA, addr);
 }
 
 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        i915_reg_t mmio;
 
        /* The ring status page addresses are no longer next to the rest of
         * the ring registers as of gen7.
         */
-       if (IS_GEN7(dev)) {
+       if (IS_GEN7(dev_priv)) {
                switch (engine->id) {
                case RCS:
                        mmio = RENDER_HWS_PGA_GEN7;
@@ -486,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
                        mmio = VEBOX_HWS_PGA_GEN7;
                        break;
                }
-       } else if (IS_GEN6(engine->dev)) {
+       } else if (IS_GEN6(dev_priv)) {
                mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
        } else {
                /* XXX: gen8 returns to sanity */
@@ -503,7 +498,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
         * arises: do we still need this and if so how should we go about
         * invalidating the TLB?
         */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
+       if (IS_GEN(dev_priv, 6, 7)) {
                i915_reg_t reg = RING_INSTPM(engine->mmio_base);
 
                /* ring should be idle before issuing a sync flush*/
@@ -512,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
                I915_WRITE(reg,
                           _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
                                              INSTPM_SYNC_FLUSH));
-               if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
-                            1000))
+               if (intel_wait_for_register(dev_priv,
+                                           reg, INSTPM_SYNC_FLUSH, 0,
+                                           1000))
                        DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
                                  engine->name);
        }
@@ -521,11 +517,15 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
 
 static bool stop_ring(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       if (!IS_GEN2(engine->dev)) {
+       if (!IS_GEN2(dev_priv)) {
                I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
-               if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+               if (intel_wait_for_register(dev_priv,
+                                           RING_MI_MODE(engine->mmio_base),
+                                           MODE_IDLE,
+                                           MODE_IDLE,
+                                           1000)) {
                        DRM_ERROR("%s : timed out trying to stop ring\n",
                                  engine->name);
                        /* Sometimes we observe that the idle flag is not
@@ -541,7 +541,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
        I915_WRITE_HEAD(engine, 0);
        engine->write_tail(engine, 0);
 
-       if (!IS_GEN2(engine->dev)) {
+       if (!IS_GEN2(dev_priv)) {
                (void)I915_READ_CTL(engine);
                I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
        }
@@ -556,8 +556,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
 
 static int init_ring_common(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        struct intel_ringbuffer *ringbuf = engine->buffer;
        struct drm_i915_gem_object *obj = ringbuf->obj;
        int ret = 0;
@@ -587,7 +586,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
                }
        }
 
-       if (I915_NEED_GFX_HWS(dev))
+       if (I915_NEED_GFX_HWS(dev_priv))
                intel_ring_setup_status_page(engine);
        else
                ring_setup_phys_status_page(engine);
@@ -641,59 +640,42 @@ out:
        return ret;
 }
 
-void
-intel_fini_pipe_control(struct intel_engine_cs *engine)
+void intel_fini_pipe_control(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-
        if (engine->scratch.obj == NULL)
                return;
 
-       if (INTEL_INFO(dev)->gen >= 5) {
-               kunmap(sg_page(engine->scratch.obj->pages->sgl));
-               i915_gem_object_ggtt_unpin(engine->scratch.obj);
-       }
-
+       i915_gem_object_ggtt_unpin(engine->scratch.obj);
        drm_gem_object_unreference(&engine->scratch.obj->base);
        engine->scratch.obj = NULL;
 }
 
-int
-intel_init_pipe_control(struct intel_engine_cs *engine)
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
 {
+       struct drm_i915_gem_object *obj;
        int ret;
 
        WARN_ON(engine->scratch.obj);
 
-       engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
-       if (engine->scratch.obj == NULL) {
-               DRM_ERROR("Failed to allocate seqno page\n");
-               ret = -ENOMEM;
+       obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+       if (!obj)
+               obj = i915_gem_object_create(&engine->i915->drm, size);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("Failed to allocate scratch page\n");
+               ret = PTR_ERR(obj);
                goto err;
        }
 
-       ret = i915_gem_object_set_cache_level(engine->scratch.obj,
-                                             I915_CACHE_LLC);
-       if (ret)
-               goto err_unref;
-
-       ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
+       ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
        if (ret)
                goto err_unref;
 
-       engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
-       engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
-       if (engine->scratch.cpu_page == NULL) {
-               ret = -ENOMEM;
-               goto err_unpin;
-       }
-
+       engine->scratch.obj = obj;
+       engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
        DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
                         engine->name, engine->scratch.gtt_offset);
        return 0;
 
-err_unpin:
-       i915_gem_object_ggtt_unpin(engine->scratch.obj);
 err_unref:
        drm_gem_object_unreference(&engine->scratch.obj->base);
 err:
@@ -702,11 +684,9 @@ err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-       int ret, i;
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_workarounds *w = &dev_priv->workarounds;
+       struct i915_workarounds *w = &req->i915->workarounds;
+       int ret, i;
 
        if (w->count == 0)
                return 0;
@@ -795,7 +775,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
 static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
                                 i915_reg_t reg)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        struct i915_workarounds *wa = &dev_priv->workarounds;
        const uint32_t index = wa->hw_whitelist_count[engine->id];
 
@@ -811,8 +791,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
 
 static int gen8_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
 
@@ -863,9 +842,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
 
 static int bdw_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen8_init_workarounds(engine);
        if (ret)
@@ -885,16 +863,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
                          /* WaForceContextSaveRestoreNonCoherent:bdw */
                          HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
                          /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
-                         (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+                         (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 
        return 0;
 }
 
 static int chv_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen8_init_workarounds(engine);
        if (ret)
@@ -911,38 +888,39 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
 
 static int gen9_init_workarounds(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t tmp;
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
 
-       /* WaEnableLbsSlaRetryTimerDecrement:skl */
+       /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+       /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
        I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
                   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
 
-       /* WaDisableKillLogic:bxt,skl */
+       /* WaDisableKillLogic:bxt,skl,kbl */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
                   ECOCHK_DIS_TLB);
 
-       /* WaClearFlowControlGpgpuContextSave:skl,bxt */
-       /* WaDisablePartialInstShootdown:skl,bxt */
+       /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
+       /* WaDisablePartialInstShootdown:skl,bxt,kbl */
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
                          FLOW_CONTROL_ENABLE |
                          PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
-       /* Syncing dependencies between camera and graphics:skl,bxt */
+       /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
                          GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
        /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
                                  GEN9_DG_MIRROR_FIX_ENABLE);
 
        /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
                                  GEN9_RHWO_OPTIMIZATION_DISABLE);
                /*
@@ -952,52 +930,78 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
                 */
        }
 
-       /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
-       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
+       /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
        WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
                          GEN9_ENABLE_YV12_BUGFIX |
                          GEN9_ENABLE_GPGPU_PREEMPTION);
 
-       /* Wa4x4STCOptimizationDisable:skl,bxt */
-       /* WaDisablePartialResolveInVc:skl,bxt */
+       /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+       /* WaDisablePartialResolveInVc:skl,bxt,kbl */
        WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
                                         GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
 
-       /* WaCcsTlbPrefetchDisable:skl,bxt */
+       /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
        WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
                          GEN9_CCS_TLB_PREFETCH_ENABLE);
 
        /* WaDisableMaskBasedCammingInRCC:skl,bxt */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
                                  PIXEL_MASK_CAMMING_DISABLE);
 
-       /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
-       tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-       if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
-           IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
-               tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
-       WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+       /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+                         HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+       /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+        * both tied to WaForceContextSaveRestoreNonCoherent
+        * in some hsds for skl. We keep the tie for all gen9. The
+        * documentation is a bit hazy and so we want to get common behaviour,
+        * even though there is no clear evidence we would need both on kbl/bxt.
+        * This area has been source of system hangs so we play it safe
+        * and mimic the skl regardless of what bspec says.
+        *
+        * Use Force Non-Coherent whenever executing a 3D context. This
+        * is a workaround for a possible hang in the unlikely event
+        * a TLB invalidation occurs during a PSD flush.
+        */
+
+       /* WaForceEnableNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_NON_COHERENT);
+
+       /* WaDisableHDCInvalidation:skl,bxt,kbl */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+                  BDW_DISABLE_HDC_INVALIDATION);
 
-       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
-       if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+       if (IS_SKYLAKE(dev_priv) ||
+           IS_KABYLAKE(dev_priv) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
                WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
                                  GEN8_SAMPLER_POWER_BYPASS_DIS);
 
-       /* WaDisableSTUnitPowerOptimization:skl,bxt */
+       /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
-       /* WaOCLCoherentLineFlush:skl,bxt */
+       /* WaOCLCoherentLineFlush:skl,bxt,kbl */
        I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
                                    GEN8_LQSC_FLUSH_COHERENT_LINES));
 
-       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
+       /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+       ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+       if (ret)
+               return ret;
+
+       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
        ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
        if (ret)
                return ret;
 
-       /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
+       /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
        ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
        if (ret)
                return ret;
@@ -1007,8 +1011,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 
 static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        u8 vals[3] = { 0, 0, 0 };
        unsigned int i;
 
@@ -1049,9 +1052,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
 
 static int skl_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen9_init_workarounds(engine);
        if (ret)
@@ -1062,12 +1064,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
         * until D0 which is the default case so this is equivalent to
         * !WaDisablePerCtxtPreemptionGranularityControl:skl
         */
-       if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
                I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
                           _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
        }
 
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
                /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
                I915_WRITE(FF_SLICE_CS_CHICKEN2,
                           _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1076,50 +1078,42 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
        /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
         * involving this register should also be added to WA batch as required.
         */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_E0))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
                /* WaDisableLSQCROPERFforOCL:skl */
                I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
                           GEN8_LQSC_RO_PERF_DIS);
 
        /* WaEnableGapsTsvCreditFix:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) {
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
                I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
                                           GEN9_GAPS_TSV_CREDIT_DISABLE));
        }
 
        /* WaDisablePowerCompilerClockGating:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0))
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-       /* This is tied to WaForceContextSaveRestoreNonCoherent */
-       if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
-               /*
-                *Use Force Non-Coherent whenever executing a 3D context. This
-                * is a workaround for a possible hang in the unlikely event
-                * a TLB invalidation occurs during a PSD flush.
-                */
-               /* WaForceEnableNonCoherent:skl */
-               WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                                 HDC_FORCE_NON_COHERENT);
-
-               /* WaDisableHDCInvalidation:skl */
-               I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                          BDW_DISABLE_HDC_INVALIDATION);
-       }
-
        /* WaBarrierPerformanceFixDisable:skl */
-       if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
                WA_SET_BIT_MASKED(HDC_CHICKEN0,
                                  HDC_FENCE_DEST_SLM_DISABLE |
                                  HDC_BARRIER_PERFORMANCE_DISABLE);
 
        /* WaDisableSbeCacheDispatchPortSharing:skl */
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_F0))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
                WA_SET_BIT_MASKED(
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
+       /* WaDisableGafsUnitClkGating:skl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaInPlaceDecompressionHang:skl */
+       if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
+               WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
        /* WaDisableLSQCROPERFforOCL:skl */
        ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
        if (ret)
@@ -1130,9 +1124,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
 
 static int bxt_init_workarounds(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
 
        ret = gen9_init_workarounds(engine);
        if (ret)
@@ -1140,11 +1133,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
 
        /* WaStoreMultiplePTEenable:bxt */
        /* This is a requirement according to Hardware specification */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
                I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
 
        /* WaSetClckGatingDisableMedia:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
                                            ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
        }
@@ -1153,8 +1146,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
                          STALL_DOP_GATING_DISABLE);
 
+       /* WaDisablePooledEuLoadBalancingFix:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+               WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
+                                 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+       }
+
        /* WaDisableSbeCacheDispatchPortSharing:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
                WA_SET_BIT_MASKED(
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1164,7 +1163,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
        /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
        /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
        /* WaDisableLSQCROPERFforOCL:bxt */
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
                ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
                if (ret)
                        return ret;
@@ -1174,44 +1173,116 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
                        return ret;
        }
 
+       /* WaProgramL3SqcReg1DefaultForPerf:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+               I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
+                                          L3_HIGH_PRIO_CREDITS(2));
+
+       /* WaInsertDummyPushConstPs:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       /* WaInPlaceDecompressionHang:bxt */
+       if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+               WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+                          GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+       return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       int ret;
+
+       ret = gen9_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /* WaEnableGapsTsvCreditFix:kbl */
+       I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+                                  GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+       /* WaDisableDynamicCreditSharing:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               WA_SET_BIT(GAMT_CHKN_BIT_REG,
+                          GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+       /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FENCE_DEST_SLM_DISABLE);
+
+       /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+        * involving this register should also be added to WA batch as required.
+        */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+               /* WaDisableLSQCROPERFforOCL:kbl */
+               I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+                          GEN8_LQSC_RO_PERF_DIS);
+
+       /* WaInsertDummyPushConstPs:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       /* WaDisableGafsUnitClkGating:kbl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaDisableSbeCacheDispatchPortSharing:kbl */
+       WA_SET_BIT_MASKED(
+               GEN7_HALF_SLICE_CHICKEN1,
+               GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+       /* WaInPlaceDecompressionHang:kbl */
+       WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+                  GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+       /* WaDisableLSQCROPERFforOCL:kbl */
+       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
 int init_workarounds_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        WARN_ON(engine->id != RCS);
 
        dev_priv->workarounds.count = 0;
        dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
-       if (IS_BROADWELL(dev))
+       if (IS_BROADWELL(dev_priv))
                return bdw_init_workarounds(engine);
 
-       if (IS_CHERRYVIEW(dev))
+       if (IS_CHERRYVIEW(dev_priv))
                return chv_init_workarounds(engine);
 
-       if (IS_SKYLAKE(dev))
+       if (IS_SKYLAKE(dev_priv))
                return skl_init_workarounds(engine);
 
-       if (IS_BROXTON(dev))
+       if (IS_BROXTON(dev_priv))
                return bxt_init_workarounds(engine);
 
+       if (IS_KABYLAKE(dev_priv))
+               return kbl_init_workarounds(engine);
+
        return 0;
 }
 
 static int init_render_ring(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret = init_ring_common(engine);
        if (ret)
                return ret;
 
        /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
-       if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
+       if (IS_GEN(dev_priv, 4, 6))
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
        /* We need to disable the AsyncFlip performance optimisations in order
@@ -1220,22 +1291,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
         *
         * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
         */
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+       if (IS_GEN(dev_priv, 6, 7))
                I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
        /* Required for the hardware to program scanline values for waiting */
        /* WaEnableFlushTlbInvalidationMode:snb */
-       if (INTEL_INFO(dev)->gen == 6)
+       if (IS_GEN6(dev_priv))
                I915_WRITE(GFX_MODE,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
 
        /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
-       if (IS_GEN7(dev))
+       if (IS_GEN7(dev_priv))
                I915_WRITE(GFX_MODE_GEN7,
                           _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
                           _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(dev_priv)) {
                /* From the Sandybridge PRM, volume 1 part 3, page 24:
                 * "If this bit is set, STCunit will have LRA as replacement
                 *  policy. [...] This bit must be reset.  LRA replacement
@@ -1245,19 +1316,18 @@ static int init_render_ring(struct intel_engine_cs *engine)
                           _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
        }
 
-       if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8)
+       if (IS_GEN(dev_priv, 6, 7))
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       if (HAS_L3_DPF(dev))
-               I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
+       if (INTEL_INFO(dev_priv)->gen >= 6)
+               I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 
        return init_workarounds_ring(engine);
 }
 
 static void render_ring_cleanup(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        if (dev_priv->semaphore_obj) {
                i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1273,13 +1343,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 8
        struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *waiter;
        enum intel_engine_id id;
        int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1288,19 +1357,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
                intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
                                           PIPE_CONTROL_QW_WRITE |
-                                          PIPE_CONTROL_FLUSH_ENABLE);
+                                          PIPE_CONTROL_CS_STALL);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset));
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, 0);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->hw_id));
@@ -1315,13 +1382,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 6
        struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *waiter;
        enum intel_engine_id id;
        int ret, num_rings;
 
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1330,18 +1396,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
                return ret;
 
        for_each_engine_id(waiter, dev_priv, id) {
-               u32 seqno;
                u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
-               seqno = i915_gem_request_get_seqno(signaller_req);
                intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
                                           MI_FLUSH_DW_OP_STOREDW);
                intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
                                           MI_FLUSH_DW_USE_GTT);
                intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-               intel_ring_emit(signaller, seqno);
+               intel_ring_emit(signaller, signaller_req->seqno);
                intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
                                           MI_SEMAPHORE_TARGET(waiter->hw_id));
                intel_ring_emit(signaller, 0);
@@ -1354,14 +1418,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                       unsigned int num_dwords)
 {
        struct intel_engine_cs *signaller = signaller_req->engine;
-       struct drm_device *dev = signaller->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *useless;
        enum intel_engine_id id;
        int ret, num_rings;
 
 #define MBOX_UPDATE_DWORDS 3
-       num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+       num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
        num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
@@ -1373,11 +1436,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
 
                if (i915_mmio_reg_valid(mbox_reg)) {
-                       u32 seqno = i915_gem_request_get_seqno(signaller_req);
-
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
                        intel_ring_emit_reg(signaller, mbox_reg);
-                       intel_ring_emit(signaller, seqno);
+                       intel_ring_emit(signaller, signaller_req->seqno);
                }
        }
 
@@ -1413,17 +1474,45 @@ gen6_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+       intel_ring_emit(engine, req->seqno);
+       intel_ring_emit(engine, MI_USER_INTERRUPT);
+       __intel_ring_advance(engine);
+
+       return 0;
+}
+
+static int
+gen8_render_add_request(struct drm_i915_gem_request *req)
+{
+       struct intel_engine_cs *engine = req->engine;
+       int ret;
+
+       if (engine->semaphore.signal)
+               ret = engine->semaphore.signal(req, 8);
+       else
+               ret = intel_ring_begin(req, 8);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+       intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+                                PIPE_CONTROL_CS_STALL |
+                                PIPE_CONTROL_QW_WRITE));
+       intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
+       intel_ring_emit(engine, 0);
        intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       /* We're thrashing one dword of HWS. */
+       intel_ring_emit(engine, 0);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
+       intel_ring_emit(engine, MI_NOOP);
        __intel_ring_advance(engine);
 
        return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
                                              u32 seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        return dev_priv->last_seqno < seqno;
 }
 
@@ -1441,7 +1530,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
               u32 seqno)
 {
        struct intel_engine_cs *waiter = waiter_req->engine;
-       struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+       struct drm_i915_private *dev_priv = waiter_req->i915;
+       u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+       struct i915_hw_ppgtt *ppgtt;
        int ret;
 
        ret = intel_ring_begin(waiter_req, 4);
@@ -1450,14 +1541,20 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 
        intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
                                MI_SEMAPHORE_GLOBAL_GTT |
-                               MI_SEMAPHORE_POLL |
                                MI_SEMAPHORE_SAD_GTE_SDD);
        intel_ring_emit(waiter, seqno);
-       intel_ring_emit(waiter,
-                       lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
-       intel_ring_emit(waiter,
-                       upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+       intel_ring_emit(waiter, lower_32_bits(offset));
+       intel_ring_emit(waiter, upper_32_bits(offset));
        intel_ring_advance(waiter);
+
+       /* When the !RCS engines idle waiting upon a semaphore, they lose their
+        * pagetables and we must reload them before executing the batch.
+        * We do this on the i915_switch_context() following the wait and
+        * before the dispatch.
+        */
+       ppgtt = waiter_req->ctx->ppgtt;
+       if (ppgtt && waiter_req->engine->id != RCS)
+               ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
        return 0;
 }
 
@@ -1486,7 +1583,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
                return ret;
 
        /* If seqno wrap happened, omit the wait with no-ops */
-       if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+       if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
                intel_ring_emit(waiter, dw1 | wait_mbox);
                intel_ring_emit(waiter, seqno);
                intel_ring_emit(waiter, 0);
@@ -1502,72 +1599,28 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
        return 0;
 }
 
-#define PIPE_CONTROL_FLUSH(ring__, addr__)                                     \
-do {                                                                   \
-       intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |                \
-                PIPE_CONTROL_DEPTH_STALL);                             \
-       intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);                    \
-       intel_ring_emit(ring__, 0);                                                     \
-       intel_ring_emit(ring__, 0);                                                     \
-} while (0)
-
-static int
-pc_render_add_request(struct drm_i915_gem_request *req)
+static void
+gen5_seqno_barrier(struct intel_engine_cs *ring)
 {
-       struct intel_engine_cs *engine = req->engine;
-       u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
-       int ret;
-
-       /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
-        * incoherent with writes to memory, i.e. completely fubar,
-        * so we need to use PIPE_NOTIFY instead.
+       /* MI_STORE are internally buffered by the GPU and not flushed
+        * either by MI_FLUSH or SyncFlush or any other combination of
+        * MI commands.
+        *
+        * "Only the submission of the store operation is guaranteed.
+        * The write result will be complete (coherent) some time later
+        * (this is practically a finite period but there is no guaranteed
+        * latency)."
         *
-        * However, we also need to workaround the qword write
-        * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
-        * memory before requesting an interrupt.
+        * Empirically, we observe that we need a delay of at least 75us to
+        * be sure that the seqno write is visible by the CPU.
         */
-       ret = intel_ring_begin(req, 32);
-       if (ret)
-               return ret;
-
-       intel_ring_emit(engine,
-                       GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
-                       PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
-       intel_ring_emit(engine,
-                       engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
-       intel_ring_emit(engine, 0);
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-       scratch_addr += 2 * CACHELINE_BYTES;
-       PIPE_CONTROL_FLUSH(engine, scratch_addr);
-
-       intel_ring_emit(engine,
-                       GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
-                       PIPE_CONTROL_WRITE_FLUSH |
-                       PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
-                       PIPE_CONTROL_NOTIFY);
-       intel_ring_emit(engine,
-                       engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
-       intel_ring_emit(engine, 0);
-       __intel_ring_advance(engine);
-
-       return 0;
+       usleep_range(125, 250);
 }
 
 static void
 gen6_seqno_barrier(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        /* Workaround to force correct ordering between irq and seqno writes on
         * ivb (and maybe also on snb) by reading from a CS register (like
@@ -1589,133 +1642,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
        spin_unlock_irq(&dev_priv->uncore.lock);
 }
 
-static u32
-ring_get_seqno(struct intel_engine_cs *engine)
-{
-       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
 static void
-ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
+gen5_irq_enable(struct intel_engine_cs *engine)
 {
-       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
-}
-
-static u32
-pc_render_get_seqno(struct intel_engine_cs *engine)
-{
-       return engine->scratch.cpu_page[0];
+       gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
-pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
-       engine->scratch.cpu_page[0] = seqno;
-}
-
-static bool
-gen5_ring_get_irq(struct intel_engine_cs *engine)
+gen5_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0)
-               gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
 }
 
 static void
-gen5_ring_put_irq(struct intel_engine_cs *engine)
-{
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0)
-               gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-}
-
-static bool
-i9xx_ring_get_irq(struct intel_engine_cs *engine)
+i9xx_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (!intel_irqs_enabled(dev_priv))
-               return false;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~engine->irq_enable_mask;
-               I915_WRITE(IMR, dev_priv->irq_mask);
-               POSTING_READ(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       dev_priv->irq_mask &= ~engine->irq_enable_mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
 static void
-i9xx_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               dev_priv->irq_mask |= engine->irq_enable_mask;
-               I915_WRITE(IMR, dev_priv->irq_mask);
-               POSTING_READ(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       dev_priv->irq_mask |= engine->irq_enable_mask;
+       I915_WRITE(IMR, dev_priv->irq_mask);
 }
 
-static bool
-i8xx_ring_get_irq(struct intel_engine_cs *engine)
+static void
+i8xx_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (!intel_irqs_enabled(dev_priv))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               dev_priv->irq_mask &= ~engine->irq_enable_mask;
-               I915_WRITE16(IMR, dev_priv->irq_mask);
-               POSTING_READ16(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       return true;
+       dev_priv->irq_mask &= ~engine->irq_enable_mask;
+       I915_WRITE16(IMR, dev_priv->irq_mask);
+       POSTING_READ16(RING_IMR(engine->mmio_base));
 }
 
 static void
-i8xx_ring_put_irq(struct intel_engine_cs *engine)
+i8xx_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               dev_priv->irq_mask |= engine->irq_enable_mask;
-               I915_WRITE16(IMR, dev_priv->irq_mask);
-               POSTING_READ16(IMR);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       dev_priv->irq_mask |= engine->irq_enable_mask;
+       I915_WRITE16(IMR, dev_priv->irq_mask);
 }
 
 static int
@@ -1749,135 +1723,68 @@ i9xx_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
        intel_ring_emit(engine,
                        I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+       intel_ring_emit(engine, req->seqno);
        intel_ring_emit(engine, MI_USER_INTERRUPT);
        __intel_ring_advance(engine);
 
        return 0;
 }
 
-static bool
-gen6_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen6_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS)
-                       I915_WRITE_IMR(engine,
-                                      ~(engine->irq_enable_mask |
-                                        GT_PARITY_ERROR(dev)));
-               else
-                       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-               gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       return true;
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask |
+                        engine->irq_keep_mask));
+       gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-gen6_ring_put_irq(struct intel_engine_cs *engine)
+gen6_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS)
-                       I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
-               else
-                       I915_WRITE_IMR(engine, ~0);
-               gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+       gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-hsw_vebox_get_irq(struct intel_engine_cs *engine)
+static void
+hsw_vebox_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-               gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-       return true;
+       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+       gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
 static void
-hsw_vebox_put_irq(struct intel_engine_cs *engine)
+hsw_vebox_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               I915_WRITE_IMR(engine, ~0);
-               gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~0);
+       gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
 }
 
-static bool
-gen8_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen8_irq_enable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
-
-       if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-               return false;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (engine->irq_refcount++ == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS) {
-                       I915_WRITE_IMR(engine,
-                                      ~(engine->irq_enable_mask |
-                                        GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
-               } else {
-                       I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
-               }
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       return true;
+       I915_WRITE_IMR(engine,
+                      ~(engine->irq_enable_mask |
+                        engine->irq_keep_mask));
+       POSTING_READ_FW(RING_IMR(engine->mmio_base));
 }
 
 static void
-gen8_ring_put_irq(struct intel_engine_cs *engine)
+gen8_irq_disable(struct intel_engine_cs *engine)
 {
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       unsigned long flags;
+       struct drm_i915_private *dev_priv = engine->i915;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--engine->irq_refcount == 0) {
-               if (HAS_L3_DPF(dev) && engine->id == RCS) {
-                       I915_WRITE_IMR(engine,
-                                      ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-               } else {
-                       I915_WRITE_IMR(engine, ~0);
-               }
-               POSTING_READ(RING_IMR(engine->mmio_base));
-       }
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
 static int
@@ -1991,12 +1898,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 
 static void cleanup_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
 
        if (!dev_priv->status_page_dmah)
                return;
 
-       drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+       drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
        engine->status_page.page_addr = NULL;
 }
 
@@ -2022,10 +1929,10 @@ static int init_status_page(struct intel_engine_cs *engine)
                unsigned flags;
                int ret;
 
-               obj = i915_gem_alloc_object(engine->dev, 4096);
-               if (obj == NULL) {
+               obj = i915_gem_object_create(&engine->i915->drm, 4096);
+               if (IS_ERR(obj)) {
                        DRM_ERROR("Failed to allocate status page\n");
-                       return -ENOMEM;
+                       return PTR_ERR(obj);
                }
 
                ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2033,7 +1940,7 @@ static int init_status_page(struct intel_engine_cs *engine)
                        goto err_unref;
 
                flags = 0;
-               if (!HAS_LLC(engine->dev))
+               if (!HAS_LLC(engine->i915))
                        /* On g33, we cannot place HWS above 256MiB, so
                         * restrict its pinning to the low mappable arena.
                         * Though this restriction is not documented for
@@ -2067,11 +1974,11 @@ err_unref:
 
 static int init_phys_status_page(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        if (!dev_priv->status_page_dmah) {
                dev_priv->status_page_dmah =
-                       drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
+                       drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
                if (!dev_priv->status_page_dmah)
                        return -ENOMEM;
        }
@@ -2084,20 +1991,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
+       GEM_BUG_ON(ringbuf->vma == NULL);
+       GEM_BUG_ON(ringbuf->virtual_start == NULL);
+
        if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
                i915_gem_object_unpin_map(ringbuf->obj);
        else
-               iounmap(ringbuf->virtual_start);
+               i915_vma_unpin_iomap(ringbuf->vma);
        ringbuf->virtual_start = NULL;
-       ringbuf->vma = NULL;
+
        i915_gem_object_ggtt_unpin(ringbuf->obj);
+       ringbuf->vma = NULL;
 }
 
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
                                     struct intel_ringbuffer *ringbuf)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_object *obj = ringbuf->obj;
        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
        unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2131,10 +2040,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                /* Access through the GTT requires the device to be awake. */
                assert_rpm_wakelock_held(dev_priv);
 
-               addr = ioremap_wc(ggtt->mappable_base +
-                                 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
-               if (addr == NULL) {
-                       ret = -ENOMEM;
+               addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
+               if (IS_ERR(addr)) {
+                       ret = PTR_ERR(addr);
                        goto err_unpin;
                }
        }
@@ -2163,9 +2071,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
        if (!HAS_LLC(dev))
                obj = i915_gem_object_create_stolen(dev, ringbuf->size);
        if (obj == NULL)
-               obj = i915_gem_alloc_object(dev, ringbuf->size);
-       if (obj == NULL)
-               return -ENOMEM;
+               obj = i915_gem_object_create(dev, ringbuf->size);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        /* mark ring buffers as read-only from GPU side by default */
        obj->gt_ro = 1;
@@ -2197,13 +2105,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
         * of the buffer.
         */
        ring->effective_size = size;
-       if (IS_I830(engine->dev) || IS_845G(engine->dev))
+       if (IS_I830(engine->i915) || IS_845G(engine->i915))
                ring->effective_size -= 2 * CACHELINE_BYTES;
 
        ring->last_retired_head = -1;
        intel_ring_update_space(ring);
 
-       ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+       ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
                                 engine->name, ret);
@@ -2223,15 +2131,67 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
        kfree(ring);
 }
 
+static int intel_ring_context_pin(struct i915_gem_context *ctx,
+                                 struct intel_engine_cs *engine)
+{
+       struct intel_context *ce = &ctx->engine[engine->id];
+       int ret;
+
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+       if (ce->pin_count++)
+               return 0;
+
+       if (ce->state) {
+               ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+               if (ret)
+                       goto error;
+       }
+
+       /* The kernel context is only used as a placeholder for flushing the
+        * active context. It is never used for submitting user rendering and
+        * as such never requires the golden render context, and so we can skip
+        * emitting it when we switch to the kernel context. This is required
+        * as during eviction we cannot allocate and pin the renderstate in
+        * order to initialise the context.
+        */
+       if (ctx == ctx->i915->kernel_context)
+               ce->initialised = true;
+
+       i915_gem_context_reference(ctx);
+       return 0;
+
+error:
+       ce->pin_count = 0;
+       return ret;
+}
+
+static void intel_ring_context_unpin(struct i915_gem_context *ctx,
+                                    struct intel_engine_cs *engine)
+{
+       struct intel_context *ce = &ctx->engine[engine->id];
+
+       lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+       if (--ce->pin_count)
+               return;
+
+       if (ce->state)
+               i915_gem_object_ggtt_unpin(ce->state);
+
+       i915_gem_context_unreference(ctx);
+}
+
 static int intel_init_ring_buffer(struct drm_device *dev,
                                  struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_ringbuffer *ringbuf;
        int ret;
 
        WARN_ON(engine->buffer);
 
-       engine->dev = dev;
+       engine->i915 = dev_priv;
        INIT_LIST_HEAD(&engine->active_list);
        INIT_LIST_HEAD(&engine->request_list);
        INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2240,7 +2200,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        memset(engine->semaphore.sync_seqno, 0,
               sizeof(engine->semaphore.sync_seqno));
 
-       init_waitqueue_head(&engine->irq_queue);
+       ret = intel_engine_init_breadcrumbs(engine);
+       if (ret)
+               goto error;
+
+       /* We may need to do things with the shrinker which
+        * require us to immediately switch back to the default
+        * context. This can cause a problem as pinning the
+        * default context also requires GTT space which may not
+        * be available. To avoid this we always pin the default
+        * context.
+        */
+       ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
+       if (ret)
+               goto error;
 
        ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
        if (IS_ERR(ringbuf)) {
@@ -2249,7 +2222,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
        }
        engine->buffer = ringbuf;
 
-       if (I915_NEED_GFX_HWS(dev)) {
+       if (I915_NEED_GFX_HWS(dev_priv)) {
                ret = init_status_page(engine);
                if (ret)
                        goto error;
@@ -2260,7 +2233,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                        goto error;
        }
 
-       ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+       ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
        if (ret) {
                DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
                                engine->name, ret);
@@ -2286,11 +2259,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
        if (!intel_engine_initialized(engine))
                return;
 
-       dev_priv = to_i915(engine->dev);
+       dev_priv = engine->i915;
 
        if (engine->buffer) {
                intel_stop_engine(engine);
-               WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+               WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
                intel_unpin_ringbuffer_obj(engine->buffer);
                intel_ringbuffer_free(engine->buffer);
@@ -2300,7 +2273,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
        if (engine->cleanup)
                engine->cleanup(engine);
 
-       if (I915_NEED_GFX_HWS(engine->dev)) {
+       if (I915_NEED_GFX_HWS(dev_priv)) {
                cleanup_status_page(engine);
        } else {
                WARN_ON(engine->id != RCS);
@@ -2309,7 +2282,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
 
        i915_cmd_parser_fini_ring(engine);
        i915_gem_batch_pool_fini(&engine->batch_pool);
-       engine->dev = NULL;
+       intel_engine_fini_breadcrumbs(engine);
+
+       intel_ring_context_unpin(dev_priv->kernel_context, engine);
+
+       engine->i915 = NULL;
 }
 
 int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2332,46 +2309,22 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-       request->ringbuf = request->engine->buffer;
-       return 0;
-}
+       int ret;
 
-int intel_ring_reserve_space(struct drm_i915_gem_request *request)
-{
-       /*
-        * The first call merely notes the reserve request and is common for
-        * all back ends. The subsequent localised _begin() call actually
-        * ensures that the reservation is available. Without the begin, if
-        * the request creator immediately submitted the request without
-        * adding any commands to it then there might not actually be
-        * sufficient room for the submission commands.
+       /* Flush enough space to reduce the likelihood of waiting after
+        * we start building the request - in which case we will just
+        * have to repeat work.
         */
-       intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
-
-       return intel_ring_begin(request, 0);
-}
-
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
-{
-       GEM_BUG_ON(ringbuf->reserved_size);
-       ringbuf->reserved_size = size;
-}
+       request->reserved_space += LEGACY_REQUEST_SIZE;
 
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
-{
-       GEM_BUG_ON(!ringbuf->reserved_size);
-       ringbuf->reserved_size   = 0;
-}
+       request->ringbuf = request->engine->buffer;
 
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
-{
-       GEM_BUG_ON(!ringbuf->reserved_size);
-       ringbuf->reserved_size   = 0;
-}
+       ret = intel_ring_begin(request, 0);
+       if (ret)
+               return ret;
 
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
-{
-       GEM_BUG_ON(ringbuf->reserved_size);
+       request->reserved_space -= LEGACY_REQUEST_SIZE;
+       return 0;
 }
 
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
@@ -2393,7 +2346,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
         *
         * See also i915_gem_request_alloc() and i915_add_request().
         */
-       GEM_BUG_ON(!ringbuf->reserved_size);
+       GEM_BUG_ON(!req->reserved_space);
 
        list_for_each_entry(target, &engine->request_list, list) {
                unsigned space;
@@ -2428,7 +2381,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
        int total_bytes, wait_bytes;
        bool need_wrap = false;
 
-       total_bytes = bytes + ringbuf->reserved_size;
+       total_bytes = bytes + req->reserved_space;
 
        if (unlikely(bytes > remain_usable)) {
                /*
@@ -2444,7 +2397,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
                 * and only need to effectively wait for the reserved
                 * size space from the start of ringbuffer.
                 */
-               wait_bytes = remain_actual + ringbuf->reserved_size;
+               wait_bytes = remain_actual + req->reserved_space;
        } else {
                /* No wrapping required, just waiting. */
                wait_bytes = total_bytes;
@@ -2501,7 +2454,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 
 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
+       struct drm_i915_private *dev_priv = engine->i915;
 
        /* Our semaphore implementation is strictly monotonic (i.e. we proceed
         * so long as the semaphore value in the register/page is greater
@@ -2511,7 +2464,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
         * the semaphore value, then when the seqno moves backwards all
         * future waits will complete instantly (causing rendering corruption).
         */
-       if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
+       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
                I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
                I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
                if (HAS_VEBOX(dev_priv))
@@ -2528,43 +2481,58 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
        memset(engine->semaphore.sync_seqno, 0,
               sizeof(engine->semaphore.sync_seqno));
 
-       engine->set_seqno(engine, seqno);
+       intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+       if (engine->irq_seqno_barrier)
+               engine->irq_seqno_barrier(engine);
        engine->last_submitted_seqno = seqno;
 
        engine->hangcheck.seqno = seqno;
+
+       /* After manually advancing the seqno, fake the interrupt in case
+        * there are any waiters for that seqno.
+        */
+       rcu_read_lock();
+       intel_engine_wakeup(engine);
+       rcu_read_unlock();
 }
 
 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
                                     u32 value)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /* Every tail move must follow the sequence below */
 
        /* Disable notification that the ring is IDLE. The GT
         * will then assume that it is busy and bring it out of rc6.
         */
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+       I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+                     _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 
        /* Clear the context id. Here be magic! */
-       I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+       I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
 
        /* Wait for the ring not to be idle, i.e. for it to wake up. */
-       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
-                     GEN6_BSD_SLEEP_INDICATOR) == 0,
-                    50))
+       if (intel_wait_for_register_fw(dev_priv,
+                                      GEN6_BSD_SLEEP_PSMI_CONTROL,
+                                      GEN6_BSD_SLEEP_INDICATOR,
+                                      0,
+                                      50))
                DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
        /* Now that the ring is fully powered up, update the tail */
-       I915_WRITE_TAIL(engine, value);
-       POSTING_READ(RING_TAIL(engine->mmio_base));
+       I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
+       POSTING_READ_FW(RING_TAIL(engine->mmio_base));
 
        /* Let the ring send IDLE messages to the GT again,
         * and so let it sleep to conserve power when idle.
         */
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-                  _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+       I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
+                     _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
@@ -2579,7 +2547,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(engine->dev)->gen >= 8)
+       if (INTEL_GEN(req->i915) >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2601,7 +2569,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
        intel_ring_emit(engine, cmd);
        intel_ring_emit(engine,
                        I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(engine->dev)->gen >= 8) {
+       if (INTEL_GEN(req->i915) >= 8) {
                intel_ring_emit(engine, 0); /* upper addr */
                intel_ring_emit(engine, 0); /* value */
        } else  {
@@ -2692,7 +2660,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
                           u32 invalidate, u32 flush)
 {
        struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
        uint32_t cmd;
        int ret;
 
@@ -2701,7 +2668,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(dev)->gen >= 8)
+       if (INTEL_GEN(req->i915) >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2722,7 +2689,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
        intel_ring_emit(engine, cmd);
        intel_ring_emit(engine,
                        I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(req->i915) >= 8) {
                intel_ring_emit(engine, 0); /* upper addr */
                intel_ring_emit(engine, 0); /* value */
        } else  {
@@ -2734,11 +2701,159 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
        return 0;
 }
 
+static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
+                                      struct intel_engine_cs *engine)
+{
+       struct drm_i915_gem_object *obj;
+       int ret, i;
+
+       if (!i915_semaphore_is_enabled(dev_priv))
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
+               obj = i915_gem_object_create(&dev_priv->drm, 4096);
+               if (IS_ERR(obj)) {
+                       DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+                       i915.semaphores = 0;
+               } else {
+                       i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+                       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+                       if (ret != 0) {
+                               drm_gem_object_unreference(&obj->base);
+                               DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+                               i915.semaphores = 0;
+                       } else {
+                               dev_priv->semaphore_obj = obj;
+                       }
+               }
+       }
+
+       if (!i915_semaphore_is_enabled(dev_priv))
+               return;
+
+       if (INTEL_GEN(dev_priv) >= 8) {
+               u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
+
+               engine->semaphore.sync_to = gen8_ring_sync;
+               engine->semaphore.signal = gen8_xcs_signal;
+
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
+                       u64 ring_offset;
+
+                       if (i != engine->id)
+                               ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
+                       else
+                               ring_offset = MI_SEMAPHORE_SYNC_INVALID;
+
+                       engine->semaphore.signal_ggtt[i] = ring_offset;
+               }
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               engine->semaphore.sync_to = gen6_ring_sync;
+               engine->semaphore.signal = gen6_signal;
+
+               /*
+                * The current semaphore is only applied on pre-gen8
+                * platform.  And there is no VCS2 ring on the pre-gen8
+                * platform. So the semaphore between RCS and VCS2 is
+                * initialized as INVALID.  Gen8 will initialize the
+                * sema between VCS2 and RCS later.
+                */
+               for (i = 0; i < I915_NUM_ENGINES; i++) {
+                       static const struct {
+                               u32 wait_mbox;
+                               i915_reg_t mbox_reg;
+                       } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
+                               [RCS] = {
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
+                               },
+                               [VCS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
+                               },
+                               [BCS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
+                                       [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
+                               },
+                               [VECS] = {
+                                       [RCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
+                                       [VCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
+                                       [BCS] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
+                               },
+                       };
+                       u32 wait_mbox;
+                       i915_reg_t mbox_reg;
+
+                       if (i == engine->id || i == VCS2) {
+                               wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
+                               mbox_reg = GEN6_NOSYNC;
+                       } else {
+                               wait_mbox = sem_data[engine->id][i].wait_mbox;
+                               mbox_reg = sem_data[engine->id][i].mbox_reg;
+                       }
+
+                       engine->semaphore.mbox.wait[i] = wait_mbox;
+                       engine->semaphore.mbox.signal[i] = mbox_reg;
+               }
+       }
+}
+
+static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
+                               struct intel_engine_cs *engine)
+{
+       if (INTEL_GEN(dev_priv) >= 8) {
+               engine->irq_enable = gen8_irq_enable;
+               engine->irq_disable = gen8_irq_disable;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 6) {
+               engine->irq_enable = gen6_irq_enable;
+               engine->irq_disable = gen6_irq_disable;
+               engine->irq_seqno_barrier = gen6_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 5) {
+               engine->irq_enable = gen5_irq_enable;
+               engine->irq_disable = gen5_irq_disable;
+               engine->irq_seqno_barrier = gen5_seqno_barrier;
+       } else if (INTEL_GEN(dev_priv) >= 3) {
+               engine->irq_enable = i9xx_irq_enable;
+               engine->irq_disable = i9xx_irq_disable;
+       } else {
+               engine->irq_enable = i8xx_irq_enable;
+               engine->irq_disable = i8xx_irq_disable;
+       }
+}
+
+static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
+                                     struct intel_engine_cs *engine)
+{
+       engine->init_hw = init_ring_common;
+       engine->write_tail = ring_write_tail;
+
+       engine->add_request = i9xx_add_request;
+       if (INTEL_GEN(dev_priv) >= 6)
+               engine->add_request = gen6_add_request;
+
+       if (INTEL_GEN(dev_priv) >= 8)
+               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+       else if (INTEL_GEN(dev_priv) >= 6)
+               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+       else if (INTEL_GEN(dev_priv) >= 4)
+               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+       else if (IS_I830(dev_priv) || IS_845G(dev_priv))
+               engine->dispatch_execbuffer = i830_dispatch_execbuffer;
+       else
+               engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
+       intel_ring_init_irq(dev_priv, engine);
+       intel_ring_init_semaphores(dev_priv, engine);
+}
+
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[RCS];
-       struct drm_i915_gem_object *obj;
        int ret;
 
        engine->name = "render ring";
@@ -2747,140 +2862,49 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        engine->hw_id = 0;
        engine->mmio_base = RENDER_RING_BASE;
 
-       if (INTEL_INFO(dev)->gen >= 8) {
-               if (i915_semaphore_is_enabled(dev)) {
-                       obj = i915_gem_alloc_object(dev, 4096);
-                       if (obj == NULL) {
-                               DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
-                               i915.semaphores = 0;
-                       } else {
-                               i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-                               ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
-                               if (ret != 0) {
-                                       drm_gem_object_unreference(&obj->base);
-                                       DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
-                                       i915.semaphores = 0;
-                               } else
-                                       dev_priv->semaphore_obj = obj;
-                       }
-               }
+       intel_ring_default_vfuncs(dev_priv, engine);
 
+       engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+       if (HAS_L3_DPF(dev_priv))
+               engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
+       if (INTEL_GEN(dev_priv) >= 8) {
                engine->init_context = intel_rcs_ctx_init;
-               engine->add_request = gen6_add_request;
+               engine->add_request = gen8_render_add_request;
                engine->flush = gen8_render_ring_flush;
-               engine->irq_get = gen8_ring_get_irq;
-               engine->irq_put = gen8_ring_put_irq;
-               engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               engine->irq_seqno_barrier = gen6_seqno_barrier;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (i915_semaphore_is_enabled(dev)) {
-                       WARN_ON(!dev_priv->semaphore_obj);
-                       engine->semaphore.sync_to = gen8_ring_sync;
+               if (i915_semaphore_is_enabled(dev_priv))
                        engine->semaphore.signal = gen8_rcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT(engine);
-               }
-       } else if (INTEL_INFO(dev)->gen >= 6) {
+       } else if (INTEL_GEN(dev_priv) >= 6) {
                engine->init_context = intel_rcs_ctx_init;
-               engine->add_request = gen6_add_request;
                engine->flush = gen7_render_ring_flush;
-               if (INTEL_INFO(dev)->gen == 6)
+               if (IS_GEN6(dev_priv))
                        engine->flush = gen6_render_ring_flush;
-               engine->irq_get = gen6_ring_get_irq;
-               engine->irq_put = gen6_ring_put_irq;
-               engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
-               engine->irq_seqno_barrier = gen6_seqno_barrier;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.sync_to = gen6_ring_sync;
-                       engine->semaphore.signal = gen6_signal;
-                       /*
-                        * The current semaphore is only applied on pre-gen8
-                        * platform.  And there is no VCS2 ring on the pre-gen8
-                        * platform. So the semaphore between RCS and VCS2 is
-                        * initialized as INVALID.  Gen8 will initialize the
-                        * sema between VCS2 and RCS later.
-                        */
-                       engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
-                       engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
-                       engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
-                       engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
-                       engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
-                       engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
-                       engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
-                       engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
-       } else if (IS_GEN5(dev)) {
-               engine->add_request = pc_render_add_request;
+       } else if (IS_GEN5(dev_priv)) {
                engine->flush = gen4_render_ring_flush;
-               engine->get_seqno = pc_render_get_seqno;
-               engine->set_seqno = pc_render_set_seqno;
-               engine->irq_get = gen5_ring_get_irq;
-               engine->irq_put = gen5_ring_put_irq;
-               engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
-                                       GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
        } else {
-               engine->add_request = i9xx_add_request;
-               if (INTEL_INFO(dev)->gen < 4)
+               if (INTEL_GEN(dev_priv) < 4)
                        engine->flush = gen2_render_ring_flush;
                else
                        engine->flush = gen4_render_ring_flush;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (IS_GEN2(dev)) {
-                       engine->irq_get = i8xx_ring_get_irq;
-                       engine->irq_put = i8xx_ring_put_irq;
-               } else {
-                       engine->irq_get = i9xx_ring_get_irq;
-                       engine->irq_put = i9xx_ring_put_irq;
-               }
                engine->irq_enable_mask = I915_USER_INTERRUPT;
        }
-       engine->write_tail = ring_write_tail;
 
-       if (IS_HASWELL(dev))
+       if (IS_HASWELL(dev_priv))
                engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
-       else if (IS_GEN8(dev))
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-       else if (INTEL_INFO(dev)->gen >= 6)
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-       else if (INTEL_INFO(dev)->gen >= 4)
-               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
-       else if (IS_I830(dev) || IS_845G(dev))
-               engine->dispatch_execbuffer = i830_dispatch_execbuffer;
-       else
-               engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+
        engine->init_hw = init_render_ring;
        engine->cleanup = render_ring_cleanup;
 
-       /* Workaround batchbuffer to combat CS tlb bug. */
-       if (HAS_BROKEN_CS_TLB(dev)) {
-               obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
-               if (obj == NULL) {
-                       DRM_ERROR("Failed to allocate batch bo\n");
-                       return -ENOMEM;
-               }
-
-               ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
-               if (ret != 0) {
-                       drm_gem_object_unreference(&obj->base);
-                       DRM_ERROR("Failed to ping batch bo\n");
-                       return ret;
-               }
-
-               engine->scratch.obj = obj;
-               engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
-       }
-
        ret = intel_init_ring_buffer(dev, engine);
        if (ret)
                return ret;
 
-       if (INTEL_INFO(dev)->gen >= 5) {
-               ret = intel_init_pipe_control(engine);
+       if (INTEL_GEN(dev_priv) >= 6) {
+               ret = intel_init_pipe_control(engine, 4096);
+               if (ret)
+                       return ret;
+       } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
+               ret = intel_init_pipe_control(engine, I830_WA_SIZE);
                if (ret)
                        return ret;
        }
@@ -2890,7 +2914,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[VCS];
 
        engine->name = "bsd ring";
@@ -2898,68 +2922,27 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
        engine->exec_id = I915_EXEC_BSD;
        engine->hw_id = 1;
 
-       engine->write_tail = ring_write_tail;
-       if (INTEL_INFO(dev)->gen >= 6) {
+       intel_ring_default_vfuncs(dev_priv, engine);
+
+       if (INTEL_GEN(dev_priv) >= 6) {
                engine->mmio_base = GEN6_BSD_RING_BASE;
                /* gen6 bsd needs a special wa for tail updates */
-               if (IS_GEN6(dev))
+               if (IS_GEN6(dev_priv))
                        engine->write_tail = gen6_bsd_ring_write_tail;
                engine->flush = gen6_bsd_ring_flush;
-               engine->add_request = gen6_add_request;
-               engine->irq_seqno_barrier = gen6_seqno_barrier;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (INTEL_INFO(dev)->gen >= 8) {
+               if (INTEL_GEN(dev_priv) >= 8)
                        engine->irq_enable_mask =
                                GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-                       engine->irq_get = gen8_ring_get_irq;
-                       engine->irq_put = gen8_ring_put_irq;
-                       engine->dispatch_execbuffer =
-                               gen8_ring_dispatch_execbuffer;
-                       if (i915_semaphore_is_enabled(dev)) {
-                               engine->semaphore.sync_to = gen8_ring_sync;
-                               engine->semaphore.signal = gen8_xcs_signal;
-                               GEN8_RING_SEMAPHORE_INIT(engine);
-                       }
-               } else {
+               else
                        engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-                       engine->irq_get = gen6_ring_get_irq;
-                       engine->irq_put = gen6_ring_put_irq;
-                       engine->dispatch_execbuffer =
-                               gen6_ring_dispatch_execbuffer;
-                       if (i915_semaphore_is_enabled(dev)) {
-                               engine->semaphore.sync_to = gen6_ring_sync;
-                               engine->semaphore.signal = gen6_signal;
-                               engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
-                               engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
-                               engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
-                               engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
-                               engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                               engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
-                               engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
-                               engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
-                               engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
-                               engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-                       }
-               }
        } else {
                engine->mmio_base = BSD_RING_BASE;
                engine->flush = bsd_ring_flush;
-               engine->add_request = i9xx_add_request;
-               engine->get_seqno = ring_get_seqno;
-               engine->set_seqno = ring_set_seqno;
-               if (IS_GEN5(dev)) {
+               if (IS_GEN5(dev_priv))
                        engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
-                       engine->irq_get = gen5_ring_get_irq;
-                       engine->irq_put = gen5_ring_put_irq;
-               } else {
+               else
                        engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
-                       engine->irq_get = i9xx_ring_get_irq;
-                       engine->irq_put = i9xx_ring_put_irq;
-               }
-               engine->dispatch_execbuffer = i965_dispatch_execbuffer;
        }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
@@ -2969,147 +2952,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
  */
 int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
 
        engine->name = "bsd2 ring";
        engine->id = VCS2;
        engine->exec_id = I915_EXEC_BSD;
        engine->hw_id = 4;
-
-       engine->write_tail = ring_write_tail;
        engine->mmio_base = GEN8_BSD2_RING_BASE;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        engine->flush = gen6_bsd_ring_flush;
-       engine->add_request = gen6_add_request;
-       engine->irq_seqno_barrier = gen6_seqno_barrier;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
        engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-       engine->irq_get = gen8_ring_get_irq;
-       engine->irq_put = gen8_ring_put_irq;
-       engine->dispatch_execbuffer =
-                       gen8_ring_dispatch_execbuffer;
-       if (i915_semaphore_is_enabled(dev)) {
-               engine->semaphore.sync_to = gen8_ring_sync;
-               engine->semaphore.signal = gen8_xcs_signal;
-               GEN8_RING_SEMAPHORE_INIT(engine);
-       }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_blt_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[BCS];
 
        engine->name = "blitter ring";
        engine->id = BCS;
        engine->exec_id = I915_EXEC_BLT;
        engine->hw_id = 2;
-
        engine->mmio_base = BLT_RING_BASE;
-       engine->write_tail = ring_write_tail;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        engine->flush = gen6_ring_flush;
-       engine->add_request = gen6_add_request;
-       engine->irq_seqno_barrier = gen6_seqno_barrier;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8)
                engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-               engine->irq_get = gen8_ring_get_irq;
-               engine->irq_put = gen8_ring_put_irq;
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.sync_to = gen8_ring_sync;
-                       engine->semaphore.signal = gen8_xcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT(engine);
-               }
-       } else {
+       else
                engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
-               engine->irq_get = gen6_ring_get_irq;
-               engine->irq_put = gen6_ring_put_irq;
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.signal = gen6_signal;
-                       engine->semaphore.sync_to = gen6_ring_sync;
-                       /*
-                        * The current semaphore is only applied on pre-gen8
-                        * platform.  And there is no VCS2 ring on the pre-gen8
-                        * platform. So the semaphore between BCS and VCS2 is
-                        * initialized as INVALID.  Gen8 will initialize the
-                        * sema between BCS and VCS2 later.
-                        */
-                       engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
-                       engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
-                       engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
-                       engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
-                       engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
-                       engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
-                       engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
-                       engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
-       }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
 
 int intel_init_vebox_ring_buffer(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine = &dev_priv->engine[VECS];
 
        engine->name = "video enhancement ring";
        engine->id = VECS;
        engine->exec_id = I915_EXEC_VEBOX;
        engine->hw_id = 3;
-
        engine->mmio_base = VEBOX_RING_BASE;
-       engine->write_tail = ring_write_tail;
+
+       intel_ring_default_vfuncs(dev_priv, engine);
+
        engine->flush = gen6_ring_flush;
-       engine->add_request = gen6_add_request;
-       engine->irq_seqno_barrier = gen6_seqno_barrier;
-       engine->get_seqno = ring_get_seqno;
-       engine->set_seqno = ring_set_seqno;
 
-       if (INTEL_INFO(dev)->gen >= 8) {
+       if (INTEL_GEN(dev_priv) >= 8) {
                engine->irq_enable_mask =
                        GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-               engine->irq_get = gen8_ring_get_irq;
-               engine->irq_put = gen8_ring_put_irq;
-               engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.sync_to = gen8_ring_sync;
-                       engine->semaphore.signal = gen8_xcs_signal;
-                       GEN8_RING_SEMAPHORE_INIT(engine);
-               }
        } else {
                engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-               engine->irq_get = hsw_vebox_get_irq;
-               engine->irq_put = hsw_vebox_put_irq;
-               engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
-               if (i915_semaphore_is_enabled(dev)) {
-                       engine->semaphore.sync_to = gen6_ring_sync;
-                       engine->semaphore.signal = gen6_signal;
-                       engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
-                       engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
-                       engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
-                       engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
-                       engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
-                       engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
-                       engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
-                       engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
-                       engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-               }
+               engine->irq_enable = hsw_vebox_irq_enable;
+               engine->irq_disable = hsw_vebox_irq_disable;
        }
-       engine->init_hw = init_ring_common;
 
        return intel_init_ring_buffer(dev, engine);
 }
index ff126485d398aaa0ffbaa104861a446e23b035b1..12cb7ed90014bd7a1b974a026b73487d0d3e2d1a 100644 (file)
@@ -62,18 +62,6 @@ struct  intel_hw_status_page {
        (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
         GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
 
-#define GEN8_RING_SEMAPHORE_INIT(e) do { \
-       if (!dev_priv->semaphore_obj) { \
-               break; \
-       } \
-       (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
-       (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
-       (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
-       (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
-       (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
-       (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
-       } while(0)
-
 enum intel_ring_hangcheck_action {
        HANGCHECK_IDLE = 0,
        HANGCHECK_WAIT,
@@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action {
 
 struct intel_ring_hangcheck {
        u64 acthd;
+       unsigned long user_interrupts;
        u32 seqno;
-       unsigned user_interrupts;
        int score;
        enum intel_ring_hangcheck_action action;
        int deadlock;
@@ -107,7 +95,6 @@ struct intel_ringbuffer {
        int space;
        int size;
        int effective_size;
-       int reserved_size;
 
        /** We track the position of the requests in the ring buffer, and
         * when each is retired we increment last_retired_head as the GPU
@@ -120,7 +107,7 @@ struct intel_ringbuffer {
        u32 last_retired_head;
 };
 
-struct intel_context;
+struct i915_gem_context;
 struct drm_i915_reg_table;
 
 /*
@@ -142,7 +129,10 @@ struct  i915_ctx_workarounds {
        struct drm_i915_gem_object *obj;
 };
 
-struct  intel_engine_cs {
+struct drm_i915_gem_request;
+
+struct intel_engine_cs {
+       struct drm_i915_private *i915;
        const char      *name;
        enum intel_engine_id {
                RCS = 0,
@@ -157,10 +147,42 @@ struct  intel_engine_cs {
        unsigned int hw_id;
        unsigned int guc_id; /* XXX same as hw_id? */
        u32             mmio_base;
-       struct          drm_device *dev;
        struct intel_ringbuffer *buffer;
        struct list_head buffers;
 
+       /* Rather than have every client wait upon all user interrupts,
+        * with the herd waking after every interrupt and each doing the
+        * heavyweight seqno dance, we delegate the task (of being the
+        * bottom-half of the user interrupt) to the first client. After
+        * every interrupt, we wake up one client, who does the heavyweight
+        * coherent seqno read and either goes back to sleep (if incomplete),
+        * or wakes up all the completed clients in parallel, before then
+        * transferring the bottom-half status to the next client in the queue.
+        *
+        * Compared to walking the entire list of waiters in a single dedicated
+        * bottom-half, we reduce the latency of the first waiter by avoiding
+        * a context switch, but incur additional coherent seqno reads when
+        * following the chain of request breadcrumbs. Since it is most likely
+        * that we have a single client waiting on each seqno, then reducing
+        * the overhead of waking that client is much preferred.
+        */
+       struct intel_breadcrumbs {
+               struct task_struct *irq_seqno_bh; /* bh for user interrupts */
+               unsigned long irq_wakeups;
+               bool irq_posted;
+
+               spinlock_t lock; /* protects the lists of requests */
+               struct rb_root waiters; /* sorted by retirement, priority */
+               struct rb_root signals; /* sorted by retirement */
+               struct intel_wait *first_wait; /* oldest waiter by retirement */
+               struct task_struct *signaler; /* used for fence signalling */
+               struct drm_i915_gem_request *first_signal;
+               struct timer_list fake_irq; /* used after a missed interrupt */
+
+               bool irq_enabled : 1;
+               bool rpm_wakelock : 1;
+       } breadcrumbs;
+
        /*
         * A pool of objects to use as shadow copies of client batch buffers
         * when the command parser is enabled. Prevents the client from
@@ -171,11 +193,10 @@ struct  intel_engine_cs {
        struct intel_hw_status_page status_page;
        struct i915_ctx_workarounds wa_ctx;
 
-       unsigned irq_refcount; /* protected by dev_priv->irq_lock */
-       u32             irq_enable_mask;        /* bitmask to enable ring interrupt */
-       struct drm_i915_gem_request *trace_irq_req;
-       bool __must_check (*irq_get)(struct intel_engine_cs *ring);
-       void            (*irq_put)(struct intel_engine_cs *ring);
+       u32             irq_keep_mask; /* always keep these interrupts */
+       u32             irq_enable_mask; /* bitmask to enable ring interrupt */
+       void            (*irq_enable)(struct intel_engine_cs *ring);
+       void            (*irq_disable)(struct intel_engine_cs *ring);
 
        int             (*init_hw)(struct intel_engine_cs *ring);
 
@@ -194,9 +215,6 @@ struct  intel_engine_cs {
         * monotonic, even if not coherent.
         */
        void            (*irq_seqno_barrier)(struct intel_engine_cs *ring);
-       u32             (*get_seqno)(struct intel_engine_cs *ring);
-       void            (*set_seqno)(struct intel_engine_cs *ring,
-                                    u32 seqno);
        int             (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
                                               u64 offset, u32 length,
                                               unsigned dispatch_flags);
@@ -268,13 +286,11 @@ struct  intel_engine_cs {
        struct tasklet_struct irq_tasklet;
        spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
        struct list_head execlist_queue;
-       struct list_head execlist_retired_req_list;
        unsigned int fw_domains;
        unsigned int next_context_status_buffer;
        unsigned int idle_lite_restore_wa;
        bool disable_lite_restore_wa;
        u32 ctx_desc_template;
-       u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
        int             (*emit_request)(struct drm_i915_gem_request *request);
        int             (*emit_flush)(struct drm_i915_gem_request *request,
                                      u32 invalidate_domains,
@@ -306,20 +322,16 @@ struct  intel_engine_cs {
         * inspecting request list.
         */
        u32 last_submitted_seqno;
-       unsigned user_interrupts;
 
        bool gpu_caches_dirty;
 
-       wait_queue_head_t irq_queue;
-
-       struct intel_context *last_context;
+       struct i915_gem_context *last_context;
 
        struct intel_ring_hangcheck hangcheck;
 
        struct {
                struct drm_i915_gem_object *obj;
                u32 gtt_offset;
-               volatile u32 *cpu_page;
        } scratch;
 
        bool needs_cmd_parser;
@@ -350,13 +362,13 @@ struct  intel_engine_cs {
 };
 
 static inline bool
-intel_engine_initialized(struct intel_engine_cs *engine)
+intel_engine_initialized(const struct intel_engine_cs *engine)
 {
-       return engine->dev != NULL;
+       return engine->i915 != NULL;
 }
 
 static inline unsigned
-intel_engine_flag(struct intel_engine_cs *engine)
+intel_engine_flag(const struct intel_engine_cs *engine)
 {
        return 1 << engine->id;
 }
@@ -427,7 +439,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
 
 struct intel_ringbuffer *
 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
+int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
                                     struct intel_ringbuffer *ringbuf);
 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
 void intel_ringbuffer_free(struct intel_ringbuffer *ring);
@@ -458,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
 }
 int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-bool intel_engine_stopped(struct intel_engine_cs *engine);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
 void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
 int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
 int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
 
+int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
 void intel_fini_pipe_control(struct intel_engine_cs *engine);
-int intel_init_pipe_control(struct intel_engine_cs *engine);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -475,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
 int intel_init_vebox_ring_buffer(struct drm_device *dev);
 
 u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
+{
+       return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
+}
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
@@ -486,26 +501,73 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
 /*
  * Arbitrary size for largest possible 'add request' sequence. The code paths
  * are complex and variable. Empirical measurement shows that the worst case
- * is ILK at 136 words. Reserving too much is better than reserving too little
- * as that allows for corner cases that might have been missed. So the figure
- * has been rounded up to 160 words.
+ * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
+ * we need to allocate double the largest single packet within that emission
+ * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
  */
-#define MIN_SPACE_FOR_ADD_REQUEST      160
+#define MIN_SPACE_FOR_ADD_REQUEST 336
 
-/*
- * Reserve space in the ring to guarantee that the i915_add_request() call
- * will always have sufficient room to do its stuff. The request creation
- * code calls this automatically.
- */
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
-/* Cancel the reservation, e.g. because the request is being discarded. */
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
-/* Use the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
-/* Finish with the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
-
-/* Legacy ringbuffer specific portion of reservation code: */
-int intel_ring_reserve_space(struct drm_i915_gem_request *request);
+static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
+{
+       return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+}
+
+/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
+struct intel_wait {
+       struct rb_node node;
+       struct task_struct *tsk;
+       u32 seqno;
+};
+
+struct intel_signal_node {
+       struct rb_node node;
+       struct intel_wait wait;
+};
+
+int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
+
+static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
+{
+       wait->tsk = current;
+       wait->seqno = seqno;
+}
+
+static inline bool intel_wait_complete(const struct intel_wait *wait)
+{
+       return RB_EMPTY_NODE(&wait->node);
+}
+
+bool intel_engine_add_wait(struct intel_engine_cs *engine,
+                          struct intel_wait *wait);
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+                             struct intel_wait *wait);
+void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
+
+static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
+{
+       return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+}
+
+static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
+{
+       bool wakeup = false;
+       struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
+       /* Note that for this not to dangerously chase a dangling pointer,
+        * the caller is responsible for ensure that the task remain valid for
+        * wake_up_process() i.e. that the RCU grace period cannot expire.
+        *
+        * Also note that tsk is likely to be in !TASK_RUNNING state so an
+        * early test for tsk->state != TASK_RUNNING before wake_up_process()
+        * is unlikely to be beneficial.
+        */
+       if (tsk)
+               wakeup = wake_up_process(tsk);
+       return wakeup;
+}
+
+void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
+unsigned int intel_kick_waiters(struct drm_i915_private *i915);
+unsigned int intel_kick_signalers(struct drm_i915_private *i915);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
index 7fb1da4e7fc349dfdfe26bb32dd7e97192b2ec04..1c603bbe5784fa7e21049e267eab52af25a60360 100644 (file)
@@ -65,6 +65,9 @@
 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
                                    int power_well_id);
 
+static struct i915_power_well *
+lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
+
 const char *
 intel_display_power_domain_str(enum intel_display_power_domain domain)
 {
@@ -151,6 +154,23 @@ static void intel_power_well_disable(struct drm_i915_private *dev_priv,
        power_well->ops->disable(dev_priv, power_well);
 }
 
+static void intel_power_well_get(struct drm_i915_private *dev_priv,
+                                struct i915_power_well *power_well)
+{
+       if (!power_well->count++)
+               intel_power_well_enable(dev_priv, power_well);
+}
+
+static void intel_power_well_put(struct drm_i915_private *dev_priv,
+                                struct i915_power_well *power_well)
+{
+       WARN(!power_well->count, "Use count on power well %s is already zero",
+            power_well->name);
+
+       if (!--power_well->count)
+               intel_power_well_disable(dev_priv, power_well);
+}
+
 /*
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -267,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
  */
 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        /*
         * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -298,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
 static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
                                       struct i915_power_well *power_well)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        /*
         * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -345,8 +365,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
 
                if (!is_enabled) {
                        DRM_DEBUG_KMS("Enabling power well\n");
-                       if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
-                                     HSW_PWR_WELL_STATE_ENABLED), 20))
+                       if (intel_wait_for_register(dev_priv,
+                                                   HSW_PWR_WELL_DRIVER,
+                                                   HSW_PWR_WELL_STATE_ENABLED,
+                                                   HSW_PWR_WELL_STATE_ENABLED,
+                                                   20))
                                DRM_ERROR("Timeout enabling power well\n");
                        hsw_power_well_post_enable(dev_priv);
                }
@@ -419,6 +442,16 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
        BIT(POWER_DOMAIN_MODESET) |                     \
        BIT(POWER_DOMAIN_AUX_A) |                       \
        BIT(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_A_POWER_DOMAINS (                 \
+       BIT(POWER_DOMAIN_PORT_DDI_A_LANES) |            \
+       BIT(POWER_DOMAIN_AUX_A) |                       \
+       BIT(POWER_DOMAIN_INIT))
+#define BXT_DPIO_CMN_BC_POWER_DOMAINS (                        \
+       BIT(POWER_DOMAIN_PORT_DDI_B_LANES) |            \
+       BIT(POWER_DOMAIN_PORT_DDI_C_LANES) |            \
+       BIT(POWER_DOMAIN_AUX_B) |                       \
+       BIT(POWER_DOMAIN_AUX_C) |                       \
+       BIT(POWER_DOMAIN_INIT))
 
 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
 {
@@ -548,6 +581,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
 
        DRM_DEBUG_KMS("Enabling DC9\n");
 
+       intel_power_sequencer_reset(dev_priv);
        gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
 }
 
@@ -669,8 +703,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
 
        switch (power_well->data) {
        case SKL_DISP_PW_1:
-               if (wait_for((I915_READ(SKL_FUSE_STATUS) &
-                       SKL_FUSE_PG0_DIST_STATUS), 1)) {
+               if (intel_wait_for_register(dev_priv,
+                                           SKL_FUSE_STATUS,
+                                           SKL_FUSE_PG0_DIST_STATUS,
+                                           SKL_FUSE_PG0_DIST_STATUS,
+                                           1)) {
                        DRM_ERROR("PG0 not enabled\n");
                        return;
                }
@@ -731,12 +768,18 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
 
        if (check_fuse_status) {
                if (power_well->data == SKL_DISP_PW_1) {
-                       if (wait_for((I915_READ(SKL_FUSE_STATUS) &
-                               SKL_FUSE_PG1_DIST_STATUS), 1))
+                       if (intel_wait_for_register(dev_priv,
+                                                   SKL_FUSE_STATUS,
+                                                   SKL_FUSE_PG1_DIST_STATUS,
+                                                   SKL_FUSE_PG1_DIST_STATUS,
+                                                   1))
                                DRM_ERROR("PG1 distributing status timeout\n");
                } else if (power_well->data == SKL_DISP_PW_2) {
-                       if (wait_for((I915_READ(SKL_FUSE_STATUS) &
-                               SKL_FUSE_PG2_DIST_STATUS), 1))
+                       if (intel_wait_for_register(dev_priv,
+                                                   SKL_FUSE_STATUS,
+                                                   SKL_FUSE_PG2_DIST_STATUS,
+                                                   SKL_FUSE_PG2_DIST_STATUS,
+                                                   1))
                                DRM_ERROR("PG2 distributing status timeout\n");
                }
        }
@@ -800,21 +843,99 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
        skl_set_power_well(dev_priv, power_well, false);
 }
 
+static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
+{
+       enum skl_disp_power_wells power_well_id = power_well->data;
+
+       return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
+}
+
+static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+{
+       enum skl_disp_power_wells power_well_id = power_well->data;
+       struct i915_power_well *cmn_a_well;
+
+       if (power_well_id == BXT_DPIO_CMN_BC) {
+               /*
+                * We need to copy the GRC calibration value from the eDP PHY,
+                * so make sure it's powered up.
+                */
+               cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
+               intel_power_well_get(dev_priv, cmn_a_well);
+       }
+
+       bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
+
+       if (power_well_id == BXT_DPIO_CMN_BC)
+               intel_power_well_put(dev_priv, cmn_a_well);
+}
+
+static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
+}
+
+static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       return bxt_ddi_phy_is_enabled(dev_priv,
+                                     bxt_power_well_to_phy(power_well));
+}
+
+static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+{
+       if (power_well->count > 0)
+               bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
+       else
+               bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
+}
+
+
+static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
+{
+       struct i915_power_well *power_well;
+
+       power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
+       if (power_well->count > 0)
+               bxt_ddi_phy_verify_state(dev_priv,
+                                        bxt_power_well_to_phy(power_well));
+
+       power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
+       if (power_well->count > 0)
+               bxt_ddi_phy_verify_state(dev_priv,
+                                        bxt_power_well_to_phy(power_well));
+}
+
 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
        return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
 }
 
+static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
+{
+       u32 tmp = I915_READ(DBUF_CTL);
+
+       WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
+            (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
+            "Unexpected DBuf power power state (0x%08x)\n", tmp);
+}
+
 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
                                          struct i915_power_well *power_well)
 {
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
-       if (IS_BROXTON(dev_priv)) {
-               broxton_cdclk_verify_state(dev_priv);
-               broxton_ddi_phy_verify_state(dev_priv);
-       }
+       WARN_ON(dev_priv->cdclk_freq !=
+               dev_priv->display.get_display_clock_speed(&dev_priv->drm));
+
+       gen9_assert_dbuf_enabled(dev_priv);
+
+       if (IS_BROXTON(dev_priv))
+               bxt_verify_ddi_phy_power_wells(dev_priv);
 }
 
 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -948,10 +1069,16 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
         */
        I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
        I915_WRITE(CBR1_VLV, 0);
+
+       WARN_ON(dev_priv->rawclk_freq == 0);
+
+       I915_WRITE(RAWCLK_FREQ_VLV,
+                  DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
 }
 
 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 {
+       struct intel_encoder *encoder;
        enum pipe pipe;
 
        /*
@@ -962,7 +1089,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
         *
         * CHV DPLL B/C have some issues if VGA mode is enabled.
         */
-       for_each_pipe(dev_priv->dev, pipe) {
+       for_each_pipe(&dev_priv->drm, pipe) {
                u32 val = I915_READ(DPLL(pipe));
 
                val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -987,7 +1114,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
 
        intel_hpd_init(dev_priv);
 
-       i915_redisable_vga_power_on(dev_priv->dev);
+       /* Re-enable the ADPA, if we have one */
+       for_each_intel_encoder(&dev_priv->drm, encoder) {
+               if (encoder->type == INTEL_OUTPUT_ANALOG)
+                       intel_crt_reset(&encoder->base);
+       }
+
+       i915_redisable_vga_power_on(&dev_priv->drm);
 }
 
 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -997,9 +1130,11 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
        spin_unlock_irq(&dev_priv->irq_lock);
 
        /* make sure we're done processing display irqs */
-       synchronize_irq(dev_priv->dev->irq);
+       synchronize_irq(dev_priv->drm.irq);
+
+       intel_power_sequencer_reset(dev_priv);
 
-       vlv_power_sequencer_reset(dev_priv);
+       intel_hpd_poll_init(dev_priv);
 }
 
 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1092,7 +1227,6 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
        u32 phy_control = dev_priv->chv_phy_control;
        u32 phy_status = 0;
        u32 phy_status_mask = 0xffffffff;
-       u32 tmp;
 
        /*
         * The BIOS can leave the PHY is some weird state
@@ -1180,10 +1314,14 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
         * The PHY may be busy with some initial calibration and whatnot,
         * so the power state can take a while to actually change.
         */
-       if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
-               WARN(phy_status != tmp,
-                    "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
-                    tmp, phy_status, dev_priv->chv_phy_control);
+       if (intel_wait_for_register(dev_priv,
+                                   DISPLAY_PHY_STATUS,
+                                   phy_status_mask,
+                                   phy_status,
+                                   10))
+               DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+                         I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
+                          phy_status, dev_priv->chv_phy_control);
 }
 
 #undef BITS_SET
@@ -1211,7 +1349,11 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
        vlv_set_power_well(dev_priv, power_well, true);
 
        /* Poll for phypwrgood signal */
-       if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
+       if (intel_wait_for_register(dev_priv,
+                                   DISPLAY_PHY_STATUS,
+                                   PHY_POWERGOOD(phy),
+                                   PHY_POWERGOOD(phy),
+                                   1))
                DRM_ERROR("Display PHY %d is not power up\n", phy);
 
        mutex_lock(&dev_priv->sb_lock);
@@ -1501,10 +1643,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
        struct i915_power_well *power_well;
        int i;
 
-       for_each_power_well(i, power_well, BIT(domain), power_domains) {
-               if (!power_well->count++)
-                       intel_power_well_enable(dev_priv, power_well);
-       }
+       for_each_power_well(i, power_well, BIT(domain), power_domains)
+               intel_power_well_get(dev_priv, power_well);
 
        power_domains->domain_use_count[domain]++;
 }
@@ -1598,14 +1738,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
             intel_display_power_domain_str(domain));
        power_domains->domain_use_count[domain]--;
 
-       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-               WARN(!power_well->count,
-                    "Use count on power well %s is already zero",
-                    power_well->name);
-
-               if (!--power_well->count)
-                       intel_power_well_disable(dev_priv, power_well);
-       }
+       for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+               intel_power_well_put(dev_priv, power_well);
 
        mutex_unlock(&power_domains->lock);
 
@@ -1776,6 +1910,13 @@ static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
        .is_enabled = gen9_dc_off_power_well_enabled,
 };
 
+static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
+       .sync_hw = bxt_dpio_cmn_power_well_sync_hw,
+       .enable = bxt_dpio_cmn_power_well_enable,
+       .disable = bxt_dpio_cmn_power_well_disable,
+       .is_enabled = bxt_dpio_cmn_power_well_enabled,
+};
+
 static struct i915_power_well hsw_power_wells[] = {
        {
                .name = "always-on",
@@ -2012,6 +2153,18 @@ static struct i915_power_well bxt_power_wells[] = {
                .ops = &skl_power_well_ops,
                .data = SKL_DISP_PW_2,
        },
+       {
+               .name = "dpio-common-a",
+               .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
+               .ops = &bxt_dpio_cmn_power_well_ops,
+               .data = BXT_DPIO_CMN_A,
+       },
+       {
+               .name = "dpio-common-bc",
+               .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
+               .ops = &bxt_dpio_cmn_power_well_ops,
+               .data = BXT_DPIO_CMN_BC,
+       },
 };
 
 static int
@@ -2131,7 +2284,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
  */
 void intel_power_domains_fini(struct drm_i915_private *dev_priv)
 {
-       struct device *device = &dev_priv->dev->pdev->dev;
+       struct device *device = &dev_priv->drm.pdev->dev;
 
        /*
         * The i915.ko module is still not prepared to be loaded when
@@ -2171,6 +2324,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
        mutex_unlock(&power_domains->lock);
 }
 
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+       POSTING_READ(DBUF_CTL);
+
+       udelay(10);
+
+       if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
+               DRM_ERROR("DBuf power enable timeout\n");
+}
+
+static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+       POSTING_READ(DBUF_CTL);
+
+       udelay(10);
+
+       if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
+               DRM_ERROR("DBuf power disable timeout!\n");
+}
+
 static void skl_display_core_init(struct drm_i915_private *dev_priv,
                                   bool resume)
 {
@@ -2195,12 +2370,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
 
        mutex_unlock(&power_domains->lock);
 
-       if (!resume)
-               return;
-
        skl_init_cdclk(dev_priv);
 
-       if (dev_priv->csr.dmc_payload)
+       gen9_dbuf_enable(dev_priv);
+
+       if (resume && dev_priv->csr.dmc_payload)
                intel_csr_load_program(dev_priv);
 }
 
@@ -2211,6 +2385,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
+       gen9_dbuf_disable(dev_priv);
+
        skl_uninit_cdclk(dev_priv);
 
        /* The spec doesn't call for removing the reset handshake flag */
@@ -2254,11 +2430,9 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
 
        mutex_unlock(&power_domains->lock);
 
-       broxton_init_cdclk(dev_priv);
-       broxton_ddi_phy_init(dev_priv);
+       bxt_init_cdclk(dev_priv);
 
-       broxton_cdclk_verify_state(dev_priv);
-       broxton_ddi_phy_verify_state(dev_priv);
+       gen9_dbuf_enable(dev_priv);
 
        if (resume && dev_priv->csr.dmc_payload)
                intel_csr_load_program(dev_priv);
@@ -2271,8 +2445,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 
-       broxton_ddi_phy_uninit(dev_priv);
-       broxton_uninit_cdclk(dev_priv);
+       gen9_dbuf_disable(dev_priv);
+
+       bxt_uninit_cdclk(dev_priv);
 
        /* The spec doesn't call for removing the reset handshake flag */
 
@@ -2403,13 +2578,14 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
 /**
  * intel_power_domains_init_hw - initialize hardware power domain state
  * @dev_priv: i915 device instance
+ * @resume: Called from resume code paths or not
  *
  * This function initializes the hardware power domain state and enables all
  * power domains using intel_display_set_init_power().
  */
 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
        power_domains->initializing = true;
@@ -2471,7 +2647,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        pm_runtime_get_sync(device);
@@ -2492,7 +2668,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
  */
 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        if (IS_ENABLED(CONFIG_PM)) {
@@ -2534,7 +2710,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        assert_rpm_wakelock_held(dev_priv);
@@ -2553,7 +2729,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        assert_rpm_wakelock_held(dev_priv);
@@ -2576,7 +2752,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
  */
 void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
        struct device *device = &dev->pdev->dev;
 
        pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
index 2128fae5687d12a59c8d06ecc4003113abcc7944..e378f35365a2e5b36dd3c193d0971e0db6f7e1f5 100644 (file)
@@ -240,7 +240,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
 static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
 {
        struct drm_device *dev = intel_sdvo->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 bval = val, cval = val;
        int i;
 
@@ -1195,7 +1195,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
 static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
 {
        struct drm_device *dev = intel_encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        struct drm_display_mode *mode = &crtc->config->base.mode;
@@ -1330,7 +1330,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
                                    enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        u16 active_outputs = 0;
        u32 tmp;
@@ -1353,7 +1353,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
                                  struct intel_crtc_state *pipe_config)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_sdvo_dtd dtd;
        int encoder_pixel_multiplier = 0;
@@ -1436,7 +1436,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
 
 static void intel_disable_sdvo(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        u32 temp;
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
                temp &= ~SDVO_ENABLE;
                intel_sdvo_write_sdvox(intel_sdvo, temp);
 
-               intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
+               intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
                intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
                intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
        }
@@ -1489,7 +1489,7 @@ static void pch_post_disable_sdvo(struct intel_encoder *encoder)
 static void intel_enable_sdvo(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        u32 temp;
@@ -1633,7 +1633,7 @@ intel_sdvo_get_edid(struct drm_connector *connector)
 static struct edid *
 intel_sdvo_get_analog_edid(struct drm_connector *connector)
 {
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
 
        return drm_get_edid(connector,
                            intel_gmbus_get_adapter(dev_priv,
@@ -1916,7 +1916,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
 static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
 {
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        struct drm_display_mode *newmode;
 
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2001,7 +2001,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
 {
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-       struct drm_i915_private *dev_priv = connector->dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(connector->dev);
        uint16_t temp_value;
        uint8_t cmd;
        int ret;
@@ -2177,12 +2177,39 @@ done:
 #undef CHECK_PROPERTY
 }
 
+static int
+intel_sdvo_connector_register(struct drm_connector *connector)
+{
+       struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+       int ret;
+
+       ret = intel_connector_register(connector);
+       if (ret)
+               return ret;
+
+       return sysfs_create_link(&connector->kdev->kobj,
+                                &sdvo->ddc.dev.kobj,
+                                sdvo->ddc.dev.kobj.name);
+}
+
+static void
+intel_sdvo_connector_unregister(struct drm_connector *connector)
+{
+       struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
+
+       sysfs_remove_link(&connector->kdev->kobj,
+                         sdvo->ddc.dev.kobj.name);
+       intel_connector_unregister(connector);
+}
+
 static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_sdvo_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = intel_sdvo_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
+       .late_register = intel_sdvo_connector_register,
+       .early_unregister = intel_sdvo_connector_unregister,
        .destroy = intel_sdvo_destroy,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2191,7 +2218,6 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
        .get_modes = intel_sdvo_get_modes,
        .mode_valid = intel_sdvo_mode_valid,
-       .best_encoder = intel_best_encoder,
 };
 
 static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -2312,7 +2338,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
 static u8
 intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct sdvo_device_mapping *my_mapping, *other_mapping;
 
        if (sdvo->port == PORT_B) {
@@ -2346,20 +2372,6 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
                return 0x72;
 }
 
-static void
-intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
-{
-       struct drm_connector *drm_connector;
-       struct intel_sdvo *sdvo_encoder;
-
-       drm_connector = &intel_connector->base;
-       sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
-
-       sysfs_remove_link(&drm_connector->kdev->kobj,
-                         sdvo_encoder->ddc.dev.kobj.name);
-       intel_connector_unregister(intel_connector);
-}
-
 static int
 intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
                          struct intel_sdvo *encoder)
@@ -2382,27 +2394,10 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
        connector->base.base.doublescan_allowed = 0;
        connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
        connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
-       connector->base.unregister = intel_sdvo_connector_unregister;
 
        intel_connector_attach_encoder(&connector->base, &encoder->base);
-       ret = drm_connector_register(drm_connector);
-       if (ret < 0)
-               goto err1;
-
-       ret = sysfs_create_link(&drm_connector->kdev->kobj,
-                               &encoder->ddc.dev.kobj,
-                               encoder->ddc.dev.kobj.name);
-       if (ret < 0)
-               goto err2;
 
        return 0;
-
-err2:
-       drm_connector_unregister(drm_connector);
-err1:
-       drm_connector_cleanup(drm_connector);
-
-       return ret;
 }
 
 static void
@@ -2529,7 +2524,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
        return true;
 
 err:
-       drm_connector_unregister(connector);
        intel_sdvo_destroy(connector);
        return false;
 }
@@ -2608,7 +2602,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
        return true;
 
 err:
-       drm_connector_unregister(connector);
        intel_sdvo_destroy(connector);
        return false;
 }
@@ -2959,7 +2952,7 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
 bool intel_sdvo_init(struct drm_device *dev,
                     i915_reg_t sdvo_reg, enum port port)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_encoder *intel_encoder;
        struct intel_sdvo *intel_sdvo;
        int i;
@@ -2981,7 +2974,7 @@ bool intel_sdvo_init(struct drm_device *dev,
        intel_encoder = &intel_sdvo->base;
        intel_encoder->type = INTEL_OUTPUT_SDVO;
        drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
-                        NULL);
+                        "SDVO %c", port_name(port));
 
        /* Read the regs to test if we can talk to the device */
        for (i = 0; i < 0x40; i++) {
index c3998188cf35de5111c735282106da8f59c9c5c9..1a840bf92eeac888f2baadae0ef257b8d3f8bdb0 100644 (file)
@@ -51,7 +51,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
 
        WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
-       if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+       if (intel_wait_for_register(dev_priv,
+                                   VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+                                   5)) {
                DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
                                 is_read ? "read" : "write");
                return -EAGAIN;
@@ -62,7 +64,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
                I915_WRITE(VLV_IOSF_DATA, *val);
        I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
 
-       if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) {
+       if (intel_wait_for_register(dev_priv,
+                                   VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+                                   5)) {
                DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
                                 is_read ? "read" : "write");
                return -ETIMEDOUT;
@@ -202,8 +206,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
        u32 value = 0;
        WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-                               100)) {
+       if (intel_wait_for_register(dev_priv,
+                                   SBI_CTL_STAT, SBI_BUSY, 0,
+                                   100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
                return 0;
        }
@@ -216,8 +221,11 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
                value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
        I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
-       if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
-                               100)) {
+       if (intel_wait_for_register(dev_priv,
+                                   SBI_CTL_STAT,
+                                   SBI_BUSY | SBI_RESPONSE_FAIL,
+                                   0,
+                                   100)) {
                DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
                return 0;
        }
@@ -232,8 +240,9 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 
        WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
 
-       if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
-                               100)) {
+       if (intel_wait_for_register(dev_priv,
+                                   SBI_CTL_STAT, SBI_BUSY, 0,
+                                   100)) {
                DRM_ERROR("timeout waiting for SBI to become ready\n");
                return;
        }
@@ -247,8 +256,11 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
                tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
        I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
 
-       if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
-                               100)) {
+       if (intel_wait_for_register(dev_priv,
+                                   SBI_CTL_STAT,
+                                   SBI_BUSY | SBI_RESPONSE_FAIL,
+                                   0,
+                                   100)) {
                DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
                return;
        }
index 0f3e2303e0e9b08229d29a7cc47172bf0efbeef5..0de935ad01c2ae78f4e6c0bbfc77860e191b876d 100644 (file)
@@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
  */
 void intel_pipe_update_start(struct intel_crtc *crtc)
 {
-       struct drm_device *dev = crtc->base.dev;
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
-       enum pipe pipe = crtc->pipe;
        long timeout = msecs_to_jiffies_timeout(1);
        int scanline, min, max, vblank_start;
        wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
 
        crtc->debug.scanline_start = scanline;
        crtc->debug.start_vbl_time = ktime_get();
-       crtc->debug.start_vbl_count =
-               dev->driver->get_vblank_counter(dev, pipe);
+       crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
 
        trace_i915_pipe_update_vblank_evaded(crtc);
 }
@@ -154,16 +151,35 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
  * re-enables interrupts and verifies the update was actually completed
  * before a vblank using the value of @start_vbl_count.
  */
-void intel_pipe_update_end(struct intel_crtc *crtc)
+void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
 {
-       struct drm_device *dev = crtc->base.dev;
        enum pipe pipe = crtc->pipe;
        int scanline_end = intel_get_crtc_scanline(crtc);
-       u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
+       u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
        ktime_t end_vbl_time = ktime_get();
 
+       if (work) {
+               work->flip_queued_vblank = end_vbl_count;
+               smp_mb__before_atomic();
+               atomic_set(&work->pending, 1);
+       }
+
        trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
 
+       /* We're still in the vblank-evade critical section, this can't race.
+        * Would be slightly nice to just grab the vblank count and arm the
+        * event outside of the critical section - the spinlock might spin for a
+        * while ... */
+       if (crtc->base.state->event) {
+               WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
+
+               spin_lock(&crtc->base.dev->event_lock);
+               drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event);
+               spin_unlock(&crtc->base.dev->event_lock);
+
+               crtc->base.state->event = NULL;
+       }
+
        local_irq_enable();
 
        if (crtc->debug.start_vbl_count &&
@@ -183,7 +199,7 @@ skl_update_plane(struct drm_plane *drm_plane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = drm_plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(drm_plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -203,8 +219,6 @@ skl_update_plane(struct drm_plane *drm_plane,
        uint32_t y = plane_state->src.y1 >> 16;
        uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
        uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
-       const struct intel_scaler *scaler =
-               &crtc_state->scaler_state.scalers[plane_state->scaler_id];
 
        plane_ctl = PLANE_CTL_ENABLE |
                PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -260,13 +274,16 @@ skl_update_plane(struct drm_plane *drm_plane,
 
        /* program plane scaler */
        if (plane_state->scaler_id >= 0) {
-               uint32_t ps_ctrl = 0;
                int scaler_id = plane_state->scaler_id;
+               const struct intel_scaler *scaler;
 
                DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
                        PS_PLANE_SEL(plane));
-               ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
-               I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+
+               scaler = &crtc_state->scaler_state.scalers[scaler_id];
+
+               I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
+                          PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
                I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
                I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
                I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
@@ -286,7 +303,7 @@ static void
 skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = dplane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(dplane);
        const int pipe = intel_plane->pipe;
        const int plane = intel_plane->plane + 1;
@@ -300,7 +317,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 static void
 chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
 {
-       struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
        int plane = intel_plane->plane;
 
        /* Seems RGB data bypasses the CSC always */
@@ -342,7 +359,7 @@ vlv_update_plane(struct drm_plane *dplane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = dplane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(dplane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -468,7 +485,7 @@ static void
 vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = dplane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(dplane);
        int pipe = intel_plane->pipe;
        int plane = intel_plane->plane;
@@ -485,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -607,7 +624,7 @@ static void
 ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        int pipe = intel_plane->pipe;
 
@@ -626,7 +643,7 @@ ilk_update_plane(struct drm_plane *plane,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -736,7 +753,7 @@ static void
 ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_plane *intel_plane = to_intel_plane(plane);
        int pipe = intel_plane->pipe;
 
@@ -1111,10 +1128,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
 
        possible_crtcs = (1 << pipe);
 
-       ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
-                                      &intel_plane_funcs,
-                                      plane_formats, num_plane_formats,
-                                      DRM_PLANE_TYPE_OVERLAY, NULL);
+       if (INTEL_INFO(dev)->gen >= 9)
+               ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
+                                              &intel_plane_funcs,
+                                              plane_formats, num_plane_formats,
+                                              DRM_PLANE_TYPE_OVERLAY,
+                                              "plane %d%c", plane + 2, pipe_name(pipe));
+       else
+               ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
+                                              &intel_plane_funcs,
+                                              plane_formats, num_plane_formats,
+                                              DRM_PLANE_TYPE_OVERLAY,
+                                              "sprite %c", sprite_name(pipe, plane));
        if (ret)
                goto fail;
 
index 223129d3c765afd836118121416a9a9650813c64..49136ad5473ede52222ac7ec383d766a377bab5c 100644 (file)
@@ -826,7 +826,7 @@ static bool
 intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp = I915_READ(TV_CTL);
 
        if (!(tmp & TV_ENC_ENABLE))
@@ -841,7 +841,7 @@ static void
 intel_enable_tv(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Prevents vblank waits from timing out in intel_tv_detect_type() */
        intel_wait_for_vblank(encoder->base.dev,
@@ -854,7 +854,7 @@ static void
 intel_disable_tv(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
 }
@@ -1013,7 +1013,7 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
 static void intel_tv_pre_enable(struct intel_encoder *encoder)
 {
        struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        struct intel_tv *intel_tv = enc_to_tv(encoder);
        const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1173,7 +1173,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
        struct drm_crtc *crtc = connector->state->crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = connector->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tv_ctl, save_tv_ctl;
        u32 tv_dac, save_tv_dac;
        int type;
@@ -1501,6 +1501,8 @@ out:
 static const struct drm_connector_funcs intel_tv_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_tv_detect,
+       .late_register = intel_connector_register,
+       .early_unregister = intel_connector_unregister,
        .destroy = intel_tv_destroy,
        .set_property = intel_tv_set_property,
        .atomic_get_property = intel_connector_atomic_get_property,
@@ -1512,7 +1514,6 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
        .mode_valid = intel_tv_mode_valid,
        .get_modes = intel_tv_get_modes,
-       .best_encoder = intel_best_encoder,
 };
 
 static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1522,7 +1523,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
 void
 intel_tv_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_connector *connector;
        struct intel_tv *intel_tv;
        struct intel_encoder *intel_encoder;
@@ -1591,7 +1592,7 @@ intel_tv_init(struct drm_device *dev)
                           DRM_MODE_CONNECTOR_SVIDEO);
 
        drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
-                        DRM_MODE_ENCODER_TVDAC, NULL);
+                        DRM_MODE_ENCODER_TVDAC, "TV");
 
        intel_encoder->compute_config = intel_tv_compute_config;
        intel_encoder->get_config = intel_tv_get_config;
@@ -1600,7 +1601,6 @@ intel_tv_init(struct drm_device *dev)
        intel_encoder->disable = intel_disable_tv;
        intel_encoder->get_hw_state = intel_tv_get_hw_state;
        intel_connector->get_hw_state = intel_connector_get_hw_state;
-       intel_connector->unregister = intel_connector_unregister;
 
        intel_connector_attach_encoder(intel_connector, intel_encoder);
        intel_encoder->type = INTEL_OUTPUT_TVOUT;
@@ -1642,5 +1642,4 @@ intel_tv_init(struct drm_device *dev)
        drm_object_attach_property(&connector->base,
                                   dev->mode_config.tv_bottom_margin_property,
                                   intel_tv->margin[TV_MARGIN_BOTTOM]);
-       drm_connector_register(connector);
 }
index 4f1dfe616856ffc748e54a1826465df5950c10f6..ff80a81b1a84f929cf9a7aacda59608a23e0dc8a 100644 (file)
@@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
        return HRTIMER_NORESTART;
 }
 
-void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+                                 bool restore)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
        struct intel_uncore_forcewake_domain *domain;
        int retry_count = 100;
@@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
                if (fw)
                        dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
 
-               if (IS_GEN6(dev) || IS_GEN7(dev))
+               if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
                        dev_priv->uncore.fifo_count =
                                fifo_free_entries(dev_priv);
        }
@@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
        return false;
 }
 
-static void __intel_uncore_early_sanitize(struct drm_device *dev,
+static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
                                          bool restore_forcewake)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        /* clear out unclaimed reg detection bit */
        if (check_for_unclaimed_mmio(dev_priv))
                DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
        /* clear out old GT FIFO errors */
-       if (IS_GEN6(dev) || IS_GEN7(dev))
+       if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
                __raw_i915_write32(dev_priv, GTFIFODBG,
                                   __raw_i915_read32(dev_priv, GTFIFODBG));
 
        /* WaDisableShadowRegForCpd:chv */
-       if (IS_CHERRYVIEW(dev)) {
+       if (IS_CHERRYVIEW(dev_priv)) {
                __raw_i915_write32(dev_priv, GTFIFOCTL,
                                   __raw_i915_read32(dev_priv, GTFIFOCTL) |
                                   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
                                   GT_FIFO_CTL_RC6_POLICY_STALL);
        }
 
-       intel_uncore_forcewake_reset(dev, restore_forcewake);
+       intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
 }
 
-void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
+void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+                                bool restore_forcewake)
 {
-       __intel_uncore_early_sanitize(dev, restore_forcewake);
-       i915_check_and_clear_faults(dev);
+       __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+       i915_check_and_clear_faults(dev_priv);
 }
 
-void intel_uncore_sanitize(struct drm_device *dev)
+void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
 {
-       i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+       i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
 
        /* BIOS often leaves RC6 enabled, but disable it for hw init */
-       intel_disable_gt_powersave(dev);
+       intel_disable_gt_powersave(dev_priv);
 }
 
 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
        fw_domain_reset(d);
 }
 
-static void intel_uncore_fw_domains_init(struct drm_device *dev)
+static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        if (INTEL_INFO(dev_priv)->gen <= 5)
                return;
 
-       if (IS_GEN9(dev)) {
+       if (IS_GEN9(dev_priv)) {
                dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
                dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
                               FORCEWAKE_ACK_BLITTER_GEN9);
                fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
                               FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
-       } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
-               if (!IS_CHERRYVIEW(dev))
+               if (!IS_CHERRYVIEW(dev_priv))
                        dev_priv->uncore.funcs.force_wake_put =
                                fw_domains_put_with_fifo;
                else
@@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
                               FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
                fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
                               FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
-       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+       } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                dev_priv->uncore.funcs.force_wake_get =
                        fw_domains_get_with_thread_status;
-               if (IS_HASWELL(dev))
+               if (IS_HASWELL(dev_priv))
                        dev_priv->uncore.funcs.force_wake_put =
                                fw_domains_put_with_fifo;
                else
                        dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                               FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
-       } else if (IS_IVYBRIDGE(dev)) {
+       } else if (IS_IVYBRIDGE(dev_priv)) {
                u32 ecobus;
 
                /* IVB configs may use multi-threaded forcewake */
@@ -1302,11 +1299,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                               FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
-               mutex_lock(&dev->struct_mutex);
+               spin_lock_irq(&dev_priv->uncore.lock);
                fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
                ecobus = __raw_i915_read32(dev_priv, ECOBUS);
                fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
-               mutex_unlock(&dev->struct_mutex);
+               spin_unlock_irq(&dev_priv->uncore.lock);
 
                if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
                        DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1314,7 +1311,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
                        fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                                       FORCEWAKE, FORCEWAKE_ACK);
                }
-       } else if (IS_GEN6(dev)) {
+       } else if (IS_GEN6(dev_priv)) {
                dev_priv->uncore.funcs.force_wake_get =
                        fw_domains_get_with_thread_status;
                dev_priv->uncore.funcs.force_wake_put =
@@ -1327,26 +1324,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
        WARN_ON(dev_priv->uncore.fw_domains == 0);
 }
 
-void intel_uncore_init(struct drm_device *dev)
+void intel_uncore_init(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       i915_check_vgpu(dev);
+       i915_check_vgpu(dev_priv);
 
        intel_uncore_edram_detect(dev_priv);
-       intel_uncore_fw_domains_init(dev);
-       __intel_uncore_early_sanitize(dev, false);
+       intel_uncore_fw_domains_init(dev_priv);
+       __intel_uncore_early_sanitize(dev_priv, false);
 
        dev_priv->uncore.unclaimed_mmio_check = 1;
 
-       switch (INTEL_INFO(dev)->gen) {
+       switch (INTEL_INFO(dev_priv)->gen) {
        default:
        case 9:
                ASSIGN_WRITE_MMIO_VFUNCS(gen9);
                ASSIGN_READ_MMIO_VFUNCS(gen9);
                break;
        case 8:
-               if (IS_CHERRYVIEW(dev)) {
+               if (IS_CHERRYVIEW(dev_priv)) {
                        ASSIGN_WRITE_MMIO_VFUNCS(chv);
                        ASSIGN_READ_MMIO_VFUNCS(chv);
 
@@ -1357,13 +1352,13 @@ void intel_uncore_init(struct drm_device *dev)
                break;
        case 7:
        case 6:
-               if (IS_HASWELL(dev)) {
+               if (IS_HASWELL(dev_priv)) {
                        ASSIGN_WRITE_MMIO_VFUNCS(hsw);
                } else {
                        ASSIGN_WRITE_MMIO_VFUNCS(gen6);
                }
 
-               if (IS_VALLEYVIEW(dev)) {
+               if (IS_VALLEYVIEW(dev_priv)) {
                        ASSIGN_READ_MMIO_VFUNCS(vlv);
                } else {
                        ASSIGN_READ_MMIO_VFUNCS(gen6);
@@ -1381,24 +1376,24 @@ void intel_uncore_init(struct drm_device *dev)
                break;
        }
 
-       if (intel_vgpu_active(dev)) {
+       if (intel_vgpu_active(dev_priv)) {
                ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
                ASSIGN_READ_MMIO_VFUNCS(vgpu);
        }
 
-       i915_check_and_clear_faults(dev);
+       i915_check_and_clear_faults(dev_priv);
 }
 #undef ASSIGN_WRITE_MMIO_VFUNCS
 #undef ASSIGN_READ_MMIO_VFUNCS
 
-void intel_uncore_fini(struct drm_device *dev)
+void intel_uncore_fini(struct drm_i915_private *dev_priv)
 {
        /* Paranoia: make sure we have disabled everything before we exit. */
-       intel_uncore_sanitize(dev);
-       intel_uncore_forcewake_reset(dev, false);
+       intel_uncore_sanitize(dev_priv);
+       intel_uncore_forcewake_reset(dev_priv, false);
 }
 
-#define GEN_RANGE(l, h) GENMASK(h, l)
+#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
 
 static const struct register_whitelist {
        i915_reg_t offset_ldw, offset_udw;
@@ -1414,7 +1409,7 @@ static const struct register_whitelist {
 int i915_reg_read_ioctl(struct drm_device *dev,
                        void *data, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_reg_read *reg = data;
        struct register_whitelist const *entry = whitelist;
        unsigned size;
@@ -1423,7 +1418,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
 
        for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
                if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
-                   (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+                   (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
                        break;
        }
 
@@ -1467,83 +1462,47 @@ out:
        return ret;
 }
 
-int i915_get_reset_stats_ioctl(struct drm_device *dev,
-                              void *data, struct drm_file *file)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_reset_stats *args = data;
-       struct i915_ctx_hang_stats *hs;
-       struct intel_context *ctx;
-       int ret;
-
-       if (args->flags || args->pad)
-               return -EINVAL;
-
-       if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-
-       ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
-       if (IS_ERR(ctx)) {
-               mutex_unlock(&dev->struct_mutex);
-               return PTR_ERR(ctx);
-       }
-       hs = &ctx->hang_stats;
-
-       if (capable(CAP_SYS_ADMIN))
-               args->reset_count = i915_reset_count(&dev_priv->gpu_error);
-       else
-               args->reset_count = 0;
-
-       args->batch_active = hs->batch_active;
-       args->batch_pending = hs->batch_pending;
-
-       mutex_unlock(&dev->struct_mutex);
-
-       return 0;
-}
-
-static int i915_reset_complete(struct drm_device *dev)
+static int i915_reset_complete(struct pci_dev *pdev)
 {
        u8 gdrst;
-       pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+       pci_read_config_byte(pdev, I915_GDRST, &gdrst);
        return (gdrst & GRDOM_RESET_STATUS) == 0;
 }
 
-static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 {
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+
        /* assert reset for at least 20 usec */
-       pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+       pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
        udelay(20);
-       pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+       pci_write_config_byte(pdev, I915_GDRST, 0);
 
-       return wait_for(i915_reset_complete(dev), 500);
+       return wait_for(i915_reset_complete(pdev), 500);
 }
 
-static int g4x_reset_complete(struct drm_device *dev)
+static int g4x_reset_complete(struct pci_dev *pdev)
 {
        u8 gdrst;
-       pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
+       pci_read_config_byte(pdev, I915_GDRST, &gdrst);
        return (gdrst & GRDOM_RESET_ENABLE) == 0;
 }
 
-static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 {
-       pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
-       return wait_for(g4x_reset_complete(dev), 500);
+       struct pci_dev *pdev = dev_priv->drm.pdev;
+       pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+       return wait_for(g4x_reset_complete(pdev), 500);
 }
 
-static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
 
-       pci_write_config_byte(dev->pdev, I915_GDRST,
+       pci_write_config_byte(pdev, I915_GDRST,
                              GRDOM_RENDER | GRDOM_RESET_ENABLE);
-       ret =  wait_for(g4x_reset_complete(dev), 500);
+       ret =  wait_for(g4x_reset_complete(pdev), 500);
        if (ret)
                return ret;
 
@@ -1551,9 +1510,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
        I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
        POSTING_READ(VDECCLK_GATE_D);
 
-       pci_write_config_byte(dev->pdev, I915_GDRST,
+       pci_write_config_byte(pdev, I915_GDRST,
                              GRDOM_MEDIA | GRDOM_RESET_ENABLE);
-       ret =  wait_for(g4x_reset_complete(dev), 500);
+       ret =  wait_for(g4x_reset_complete(pdev), 500);
        if (ret)
                return ret;
 
@@ -1561,27 +1520,29 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
        I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
        POSTING_READ(VDECCLK_GATE_D);
 
-       pci_write_config_byte(dev->pdev, I915_GDRST, 0);
+       pci_write_config_byte(pdev, I915_GDRST, 0);
 
        return 0;
 }
 
-static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
+static int ironlake_do_reset(struct drm_i915_private *dev_priv,
+                            unsigned engine_mask)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        I915_WRITE(ILK_GDSR,
                   ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
-       ret = wait_for((I915_READ(ILK_GDSR) &
-                       ILK_GRDOM_RESET_ENABLE) == 0, 500);
+       ret = intel_wait_for_register(dev_priv,
+                                     ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+                                     500);
        if (ret)
                return ret;
 
        I915_WRITE(ILK_GDSR,
                   ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
-       ret = wait_for((I915_READ(ILK_GDSR) &
-                       ILK_GRDOM_RESET_ENABLE) == 0, 500);
+       ret = intel_wait_for_register(dev_priv,
+                                     ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+                                     500);
        if (ret)
                return ret;
 
@@ -1594,25 +1555,21 @@ static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
 static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
                                u32 hw_domain_mask)
 {
-       int ret;
-
        /* GEN6_GDRST is not in the gt power well, no need to check
         * for fifo space for the write or forcewake the chip for
         * the read
         */
        __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
 
-#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
        /* Spin waiting for the device to ack the reset requests */
-       ret = wait_for(ACKED, 500);
-#undef ACKED
-
-       return ret;
+       return intel_wait_for_register_fw(dev_priv,
+                                         GEN6_GDRST, hw_domain_mask, 0,
+                                         500);
 }
 
 /**
  * gen6_reset_engines - reset individual engines
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
  *
  * This function will reset the individual engines that are set in engine_mask.
@@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
  *
  * Returns 0 on success, nonzero on error.
  */
-static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
+static int gen6_reset_engines(struct drm_i915_private *dev_priv,
+                             unsigned engine_mask)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        const u32 hw_engine_mask[I915_NUM_ENGINES] = {
                [RCS] = GEN6_GRDOM_RENDER,
@@ -1647,33 +1604,94 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
 
        ret = gen6_hw_domain_reset(dev_priv, hw_mask);
 
-       intel_uncore_forcewake_reset(dev, true);
+       intel_uncore_forcewake_reset(dev_priv, true);
 
        return ret;
 }
 
-static int wait_for_register_fw(struct drm_i915_private *dev_priv,
-                               i915_reg_t reg,
-                               const u32 mask,
-                               const u32 value,
-                               const unsigned long timeout_ms)
+/**
+ * intel_wait_for_register_fw - wait until register matches expected state
+ * @dev_priv: the i915 device
+ * @reg: the register to read
+ * @mask: mask to apply to register value
+ * @value: expected value
+ * @timeout_ms: timeout in millisecond
+ *
+ * This routine waits until the target register @reg contains the expected
+ * @value after applying the @mask, i.e. it waits until
+ *   (I915_READ_FW(@reg) & @mask) == @value
+ * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ *
+ * Note that this routine assumes the caller holds forcewake asserted, it is
+ * not suitable for very long waits. See intel_wait_for_register() if you
+ * wish to wait without holding forcewake for the duration (i.e. you expect
+ * the wait to be slow).
+ *
+ * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ */
+int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+                              i915_reg_t reg,
+                              const u32 mask,
+                              const u32 value,
+                              const unsigned long timeout_ms)
+{
+#define done ((I915_READ_FW(reg) & mask) == value)
+       int ret = wait_for_us(done, 2);
+       if (ret)
+               ret = wait_for(done, timeout_ms);
+       return ret;
+#undef done
+}
+
+/**
+ * intel_wait_for_register - wait until register matches expected state
+ * @dev_priv: the i915 device
+ * @reg: the register to read
+ * @mask: mask to apply to register value
+ * @value: expected value
+ * @timeout_ms: timeout in millisecond
+ *
+ * This routine waits until the target register @reg contains the expected
+ * @value after applying the @mask, i.e. it waits until
+ *   (I915_READ(@reg) & @mask) == @value
+ * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ *
+ * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
+ */
+int intel_wait_for_register(struct drm_i915_private *dev_priv,
+                           i915_reg_t reg,
+                           const u32 mask,
+                           const u32 value,
+                           const unsigned long timeout_ms)
 {
-       return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
+
+       unsigned fw =
+               intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+       int ret;
+
+       intel_uncore_forcewake_get(dev_priv, fw);
+       ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
+       intel_uncore_forcewake_put(dev_priv, fw);
+       if (ret)
+               ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
+                              timeout_ms);
+
+       return ret;
 }
 
 static int gen8_request_engine_reset(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *dev_priv = engine->i915;
        int ret;
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
 
        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
                      _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
 
-       ret = wait_for_register_fw(dev_priv,
-                                  RING_RESET_CTL(engine->mmio_base),
-                                  RESET_CTL_READY_TO_RESET,
-                                  RESET_CTL_READY_TO_RESET,
-                                  700);
+       ret = intel_wait_for_register_fw(dev_priv,
+                                        RING_RESET_CTL(engine->mmio_base),
+                                        RESET_CTL_READY_TO_RESET,
+                                        RESET_CTL_READY_TO_RESET,
+                                        700);
        if (ret)
                DRM_ERROR("%s: reset request timeout\n", engine->name);
 
@@ -1682,22 +1700,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
 
 static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       struct drm_i915_private *dev_priv = engine->i915;
 
        I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
                      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
 }
 
-static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
+static int gen8_reset_engines(struct drm_i915_private *dev_priv,
+                             unsigned engine_mask)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
 
        for_each_engine_masked(engine, dev_priv, engine_mask)
                if (gen8_request_engine_reset(engine))
                        goto not_ready;
 
-       return gen6_reset_engines(dev, engine_mask);
+       return gen6_reset_engines(dev_priv, engine_mask);
 
 not_ready:
        for_each_engine_masked(engine, dev_priv, engine_mask)
@@ -1706,35 +1724,35 @@ not_ready:
        return -EIO;
 }
 
-static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
-                                                         unsigned engine_mask)
+typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
+
+static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
 {
        if (!i915.reset)
                return NULL;
 
-       if (INTEL_INFO(dev)->gen >= 8)
+       if (INTEL_INFO(dev_priv)->gen >= 8)
                return gen8_reset_engines;
-       else if (INTEL_INFO(dev)->gen >= 6)
+       else if (INTEL_INFO(dev_priv)->gen >= 6)
                return gen6_reset_engines;
-       else if (IS_GEN5(dev))
+       else if (IS_GEN5(dev_priv))
                return ironlake_do_reset;
-       else if (IS_G4X(dev))
+       else if (IS_G4X(dev_priv))
                return g4x_do_reset;
-       else if (IS_G33(dev))
+       else if (IS_G33(dev_priv))
                return g33_do_reset;
-       else if (INTEL_INFO(dev)->gen >= 3)
+       else if (INTEL_INFO(dev_priv)->gen >= 3)
                return i915_do_reset;
        else
                return NULL;
 }
 
-int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
+int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int (*reset)(struct drm_device *, unsigned);
+       reset_func reset;
        int ret;
 
-       reset = intel_get_gpu_reset(dev);
+       reset = intel_get_gpu_reset(dev_priv);
        if (reset == NULL)
                return -ENODEV;
 
@@ -1742,15 +1760,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
         * request may be dropped and never completes (causing -EIO).
         */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-       ret = reset(dev, engine_mask);
+       ret = reset(dev_priv, engine_mask);
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
        return ret;
 }
 
-bool intel_has_gpu_reset(struct drm_device *dev)
+bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
 {
-       return intel_get_gpu_reset(dev) != NULL;
+       return intel_get_gpu_reset(dev_priv) != NULL;
 }
 
 int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1758,7 +1776,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv)
        int ret;
        unsigned long irqflags;
 
-       if (!i915.enable_guc_submission)
+       if (!HAS_GUC(dev_priv))
                return -EINVAL;
 
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -1802,10 +1820,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
 {
        enum forcewake_domains fw_domains;
 
-       if (intel_vgpu_active(dev_priv->dev))
+       if (intel_vgpu_active(dev_priv))
                return 0;
 
-       switch (INTEL_INFO(dev_priv)->gen) {
+       switch (INTEL_GEN(dev_priv)) {
        case 9:
                fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
                break;
@@ -1842,10 +1860,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
 {
        enum forcewake_domains fw_domains;
 
-       if (intel_vgpu_active(dev_priv->dev))
+       if (intel_vgpu_active(dev_priv))
                return 0;
 
-       switch (INTEL_INFO(dev_priv)->gen) {
+       switch (INTEL_GEN(dev_priv)) {
        case 9:
                fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
                break;
index c15051de80233a79e5e7a8295a615793345c789f..68db9621f1f0db0dcdbd715d14c02715c19b5b95 100644 (file)
@@ -403,9 +403,10 @@ struct lvds_dvo_timing {
        u8 vsync_off:4;
        u8 rsvd0:6;
        u8 hsync_off_hi:2;
-       u8 h_image;
-       u8 v_image;
-       u8 max_hv;
+       u8 himage_lo;
+       u8 vimage_lo;
+       u8 vimage_hi:4;
+       u8 himage_hi:4;
        u8 h_border;
        u8 v_border;
        u8 rsvd1:3;
@@ -446,10 +447,16 @@ struct bdb_lfp_backlight_data_entry {
        u8 obsolete3;
 } __packed;
 
+struct bdb_lfp_backlight_control_method {
+       u8 type:4;
+       u8 controller:4;
+} __packed;
+
 struct bdb_lfp_backlight_data {
        u8 entry_size;
        struct bdb_lfp_backlight_data_entry data[16];
        u8 level[16];
+       struct bdb_lfp_backlight_control_method backlight_control[16];
 } __packed;
 
 struct aimdb_header {
index a1844b50546c1529cb1ec653553a74c2a2afc059..f2c9ae822149c715bfb99a18f4a1218935cf2d8c 100644 (file)
@@ -1,7 +1,6 @@
 config DRM_IMX
        tristate "DRM Support for Freescale i.MX"
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select VIDEOMODE_HELPERS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
index a24631fdf4add310de0eeadc7001c6a12a711866..359cd276555264fcc5346a1dfc1e54da85192f61 100644 (file)
@@ -28,6 +28,11 @@ struct imx_hdmi {
        struct regmap *regmap;
 };
 
+static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
+{
+       return container_of(e, struct imx_hdmi, encoder);
+}
+
 static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
        {
                45250000, {
@@ -109,15 +114,9 @@ static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
 {
 }
 
-static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder,
-                                        struct drm_display_mode *mode,
-                                        struct drm_display_mode *adj_mode)
+static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
 {
-}
-
-static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
-{
-       struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
+       struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
        int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder);
 
        regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
@@ -125,16 +124,23 @@ static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
                           mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
 }
 
-static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder)
+static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
+                                   struct drm_crtc_state *crtc_state,
+                                   struct drm_connector_state *conn_state)
 {
-       imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24);
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+
+       imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+       imx_crtc_state->di_hsync_pin = 2;
+       imx_crtc_state->di_vsync_pin = 3;
+
+       return 0;
 }
 
 static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
-       .mode_set   = dw_hdmi_imx_encoder_mode_set,
-       .prepare    = dw_hdmi_imx_encoder_prepare,
-       .commit     = dw_hdmi_imx_encoder_commit,
+       .enable     = dw_hdmi_imx_encoder_enable,
        .disable    = dw_hdmi_imx_encoder_disable,
+       .atomic_check = dw_hdmi_imx_atomic_check,
 };
 
 static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
index 1f14b602882b539500203c2c5b1573d82a0128f5..9f7dafce3a4cf8aa94e5ac8c64ca210124609fbd 100644 (file)
  */
 #include <linux/component.h>
 #include <linux/device.h>
+#include <linux/dma-buf.h>
 #include <linux/fb.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/reservation.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_cma_helper.h>
@@ -41,6 +45,7 @@ struct imx_drm_device {
        struct imx_drm_crtc                     *crtc[MAX_CRTC];
        unsigned int                            pipes;
        struct drm_fbdev_cma                    *fbhelper;
+       struct drm_atomic_state                 *state;
 };
 
 struct imx_drm_crtc {
@@ -85,42 +90,6 @@ static int imx_drm_driver_unload(struct drm_device *drm)
        return 0;
 }
 
-static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
-{
-       struct imx_drm_device *imxdrm = crtc->dev->dev_private;
-       unsigned i;
-
-       for (i = 0; i < MAX_CRTC; i++)
-               if (imxdrm->crtc[i] && imxdrm->crtc[i]->crtc == crtc)
-                       return imxdrm->crtc[i];
-
-       return NULL;
-}
-
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
-               int hsync_pin, int vsync_pin)
-{
-       struct imx_drm_crtc_helper_funcs *helper;
-       struct imx_drm_crtc *imx_crtc;
-
-       imx_crtc = imx_drm_find_crtc(encoder->crtc);
-       if (!imx_crtc)
-               return -EINVAL;
-
-       helper = &imx_crtc->imx_drm_helper_funcs;
-       if (helper->set_interface_pix_fmt)
-               return helper->set_interface_pix_fmt(encoder->crtc,
-                                       bus_format, hsync_pin, vsync_pin);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins);
-
-int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
-{
-       return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3);
-}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
-
 int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
 {
        return drm_crtc_vblank_get(imx_drm_crtc->crtc);
@@ -205,6 +174,63 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
 static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
        .fb_create = drm_fb_cma_create,
        .output_poll_changed = imx_drm_output_poll_changed,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_plane_state *plane_state;
+       struct drm_gem_cma_object *cma_obj;
+       struct fence *excl;
+       unsigned shared_count;
+       struct fence **shared;
+       unsigned int i, j;
+       int ret;
+
+       /* Wait for fences. */
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               plane_state = crtc->primary->state;
+               if (plane_state->fb) {
+                       cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
+                       if (cma_obj->base.dma_buf) {
+                               ret = reservation_object_get_fences_rcu(
+                                       cma_obj->base.dma_buf->resv, &excl,
+                                       &shared_count, &shared);
+                               if (unlikely(ret))
+                                       DRM_ERROR("failed to get fences "
+                                                 "for buffer\n");
+
+                               if (excl) {
+                                       fence_wait(excl, false);
+                                       fence_put(excl);
+                               }
+                               for (j = 0; j < shared_count; i++) {
+                                       fence_wait(shared[j], false);
+                                       fence_put(shared[j]);
+                               }
+                       }
+               }
+       }
+
+       drm_atomic_helper_commit_modeset_disables(dev, state);
+
+       drm_atomic_helper_commit_planes(dev, state, true);
+
+       drm_atomic_helper_commit_modeset_enables(dev, state);
+
+       drm_atomic_helper_commit_hw_done(state);
+
+       drm_atomic_helper_wait_for_vblanks(dev, state);
+
+       drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
+       .atomic_commit_tail = imx_drm_atomic_commit_tail,
 };
 
 /*
@@ -246,6 +272,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
        drm->mode_config.max_width = 4096;
        drm->mode_config.max_height = 4096;
        drm->mode_config.funcs = &imx_drm_mode_config_funcs;
+       drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
 
        drm_mode_config_init(drm);
 
@@ -276,6 +303,8 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
                }
        }
 
+       drm_mode_config_reset(drm);
+
        /*
         * All components are now initialised, so setup the fb helper.
         * The fb helper takes copies of key hardware information, so the
@@ -286,7 +315,6 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
                dev_warn(drm->dev, "Invalid legacyfb_depth.  Defaulting to 16bpp\n");
                legacyfb_depth = 16;
        }
-       drm_helper_disable_unused_functions(drm);
        imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
                                drm->mode_config.num_crtc, MAX_CRTC);
        if (IS_ERR(imxdrm->fbhelper)) {
@@ -400,11 +428,11 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
 };
 
 static struct drm_driver imx_drm_driver = {
-       .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+       .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+                                 DRIVER_ATOMIC,
        .load                   = imx_drm_driver_load,
        .unload                 = imx_drm_driver_unload,
        .lastclose              = imx_drm_driver_lastclose,
-       .set_busid              = drm_platform_set_busid,
        .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .dumb_create            = drm_gem_cma_dumb_create,
@@ -489,6 +517,7 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
 static int imx_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct imx_drm_device *imxdrm;
 
        /* The drm_dev is NULL before .load hook is called */
        if (drm_dev == NULL)
@@ -496,17 +525,26 @@ static int imx_drm_suspend(struct device *dev)
 
        drm_kms_helper_poll_disable(drm_dev);
 
+       imxdrm = drm_dev->dev_private;
+       imxdrm->state = drm_atomic_helper_suspend(drm_dev);
+       if (IS_ERR(imxdrm->state)) {
+               drm_kms_helper_poll_enable(drm_dev);
+               return PTR_ERR(imxdrm->state);
+       }
+
        return 0;
 }
 
 static int imx_drm_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct imx_drm_device *imx_drm;
 
        if (drm_dev == NULL)
                return 0;
 
-       drm_helper_resume_force_mode(drm_dev);
+       imx_drm = drm_dev->dev_private;
+       drm_atomic_helper_resume(drm_dev, imx_drm->state);
        drm_kms_helper_poll_enable(drm_dev);
 
        return 0;
index b0241b9d13342f516d01cb2e3b53f6afb9dbf47a..07d33e45f90f5538982697839685f8e0a1adca78 100644 (file)
@@ -15,11 +15,22 @@ struct platform_device;
 
 unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
 
+struct imx_crtc_state {
+       struct drm_crtc_state                   base;
+       u32                                     bus_format;
+       u32                                     bus_flags;
+       int                                     di_hsync_pin;
+       int                                     di_vsync_pin;
+};
+
+static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s)
+{
+       return container_of(s, struct imx_crtc_state, base);
+}
+
 struct imx_drm_crtc_helper_funcs {
        int (*enable_vblank)(struct drm_crtc *crtc);
        void (*disable_vblank)(struct drm_crtc *crtc);
-       int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
-                       u32 bus_format, int hsync_pin, int vsync_pin);
        const struct drm_crtc_helper_funcs *crtc_helper_funcs;
        const struct drm_crtc_funcs *crtc_funcs;
 };
@@ -41,11 +52,6 @@ void imx_drm_mode_config_init(struct drm_device *drm);
 
 struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
 
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder,
-               u32 bus_format, int hsync_pin, int vsync_pin);
-int imx_drm_set_bus_format(struct drm_encoder *encoder,
-               u32 bus_format);
-
 int imx_drm_encoder_parse_of(struct drm_device *drm,
        struct drm_encoder *encoder, struct device_node *np);
 
index a58eee59550ab43d058dd64cc85ba43ea753a1dd..5d2831dfb8b9842acff6dd194337fee8bacee1f7 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_of.h>
@@ -25,6 +27,7 @@
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 #include <linux/of_device.h>
 #include <linux/of_graph.h>
+#include <video/of_display_timing.h>
 #include <video/of_videomode.h>
 #include <linux/regmap.h>
 #include <linux/videodev2.h>
@@ -48,9 +51,6 @@
 #define LDB_DI1_VS_POL_ACT_LOW         (1 << 10)
 #define LDB_BGREF_RMODE_INT            (1 << 15)
 
-#define con_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, connector)
-#define enc_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, encoder)
-
 struct imx_ldb;
 
 struct imx_ldb_channel {
@@ -59,14 +59,25 @@ struct imx_ldb_channel {
        struct drm_encoder encoder;
        struct drm_panel *panel;
        struct device_node *child;
+       struct i2c_adapter *ddc;
        int chno;
        void *edid;
        int edid_len;
        struct drm_display_mode mode;
        int mode_valid;
-       int bus_format;
+       u32 bus_format;
 };
 
+static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
+{
+       return container_of(c, struct imx_ldb_channel, connector);
+}
+
+static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
+{
+       return container_of(e, struct imx_ldb_channel, encoder);
+}
+
 struct bus_mux {
        int reg;
        int shift;
@@ -91,6 +102,32 @@ static enum drm_connector_status imx_ldb_connector_detect(
        return connector_status_connected;
 }
 
+static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
+                                     u32 bus_format)
+{
+       struct imx_ldb *ldb = imx_ldb_ch->ldb;
+       int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
+
+       switch (bus_format) {
+       case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+               break;
+       case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+               if (imx_ldb_ch->chno == 0 || dual)
+                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
+               if (imx_ldb_ch->chno == 1 || dual)
+                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
+               break;
+       case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+               if (imx_ldb_ch->chno == 0 || dual)
+                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
+                                        LDB_BIT_MAP_CH0_JEIDA;
+               if (imx_ldb_ch->chno == 1 || dual)
+                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
+                                        LDB_BIT_MAP_CH1_JEIDA;
+               break;
+       }
+}
+
 static int imx_ldb_connector_get_modes(struct drm_connector *connector)
 {
        struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
@@ -98,15 +135,14 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
 
        if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
            imx_ldb_ch->panel->funcs->get_modes) {
-               struct drm_display_info *di = &connector->display_info;
-
                num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
-               if (!imx_ldb_ch->bus_format && di->num_bus_formats)
-                       imx_ldb_ch->bus_format = di->bus_formats[0];
                if (num_modes > 0)
                        return num_modes;
        }
 
+       if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
+               imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
+
        if (imx_ldb_ch->edid) {
                drm_mode_connector_update_edid_property(connector,
                                                        imx_ldb_ch->edid);
@@ -136,10 +172,6 @@ static struct drm_encoder *imx_ldb_connector_best_encoder(
        return &imx_ldb_ch->encoder;
 }
 
-static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
 static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
                unsigned long serial_clk, unsigned long di_clk)
 {
@@ -168,43 +200,7 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
                        chno);
 }
 
-static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
-{
-       struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
-       struct imx_ldb *ldb = imx_ldb_ch->ldb;
-       int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
-       u32 bus_format;
-
-       switch (imx_ldb_ch->bus_format) {
-       default:
-               dev_warn(ldb->dev,
-                        "could not determine data mapping, default to 18-bit \"spwg\"\n");
-               /* fallthrough */
-       case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
-               bus_format = MEDIA_BUS_FMT_RGB666_1X18;
-               break;
-       case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
-               bus_format = MEDIA_BUS_FMT_RGB888_1X24;
-               if (imx_ldb_ch->chno == 0 || dual)
-                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
-               if (imx_ldb_ch->chno == 1 || dual)
-                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
-               break;
-       case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
-               bus_format = MEDIA_BUS_FMT_RGB888_1X24;
-               if (imx_ldb_ch->chno == 0 || dual)
-                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
-                                        LDB_BIT_MAP_CH0_JEIDA;
-               if (imx_ldb_ch->chno == 1 || dual)
-                       ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
-                                        LDB_BIT_MAP_CH1_JEIDA;
-               break;
-       }
-
-       imx_drm_set_bus_format(encoder, bus_format);
-}
-
-static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
+static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
 {
        struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
        struct imx_ldb *ldb = imx_ldb_ch->ldb;
@@ -214,8 +210,13 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
        drm_panel_prepare(imx_ldb_ch->panel);
 
        if (dual) {
+               clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]);
+               clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]);
+
                clk_prepare_enable(ldb->clk[0]);
                clk_prepare_enable(ldb->clk[1]);
+       } else {
+               clk_set_parent(ldb->clk_sel[mux], ldb->clk[imx_ldb_ch->chno]);
        }
 
        if (imx_ldb_ch == &ldb->channel[0] || dual) {
@@ -260,6 +261,7 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
        unsigned long serial_clk;
        unsigned long di_clk = mode->clock * 1000;
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
+       u32 bus_format = imx_ldb_ch->bus_format;
 
        if (mode->clock > 170000) {
                dev_warn(ldb->dev,
@@ -281,18 +283,36 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
        }
 
        /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
-       if (imx_ldb_ch == &ldb->channel[0]) {
+       if (imx_ldb_ch == &ldb->channel[0] || dual) {
                if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                        ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
                else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                        ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW;
        }
-       if (imx_ldb_ch == &ldb->channel[1]) {
+       if (imx_ldb_ch == &ldb->channel[1] || dual) {
                if (mode->flags & DRM_MODE_FLAG_NVSYNC)
                        ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
                else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                        ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW;
        }
+
+       if (!bus_format) {
+               struct drm_connector_state *conn_state;
+               struct drm_connector *connector;
+               int i;
+
+               for_each_connector_in_state(encoder->crtc->state->state,
+                                           connector, conn_state, i) {
+                       struct drm_display_info *di = &connector->display_info;
+
+                       if (conn_state->crtc == encoder->crtc &&
+                           di->num_bus_formats) {
+                               bus_format = di->bus_formats[0];
+                               break;
+                       }
+               }
+       }
+       imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
 }
 
 static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
@@ -352,11 +372,45 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
        drm_panel_unprepare(imx_ldb_ch->panel);
 }
 
+static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
+                                       struct drm_crtc_state *crtc_state,
+                                       struct drm_connector_state *conn_state)
+{
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+       struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
+       struct drm_display_info *di = &conn_state->connector->display_info;
+       u32 bus_format = imx_ldb_ch->bus_format;
+
+       /* Bus format description in DT overrides connector display info. */
+       if (!bus_format && di->num_bus_formats)
+               bus_format = di->bus_formats[0];
+       switch (bus_format) {
+       case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+               imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+               break;
+       case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+       case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+               imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       imx_crtc_state->di_hsync_pin = 2;
+       imx_crtc_state->di_vsync_pin = 3;
+
+       return 0;
+}
+
+
 static const struct drm_connector_funcs imx_ldb_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = imx_ldb_connector_detect,
        .destroy = imx_drm_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
@@ -369,11 +423,10 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
 };
 
 static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
-       .dpms = imx_ldb_encoder_dpms,
-       .prepare = imx_ldb_encoder_prepare,
-       .commit = imx_ldb_encoder_commit,
        .mode_set = imx_ldb_encoder_mode_set,
+       .enable = imx_ldb_encoder_enable,
        .disable = imx_ldb_encoder_disable,
+       .atomic_check = imx_ldb_encoder_atomic_check,
 };
 
 static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
@@ -395,10 +448,10 @@ static int imx_ldb_register(struct drm_device *drm,
        struct imx_ldb_channel *imx_ldb_ch)
 {
        struct imx_ldb *ldb = imx_ldb_ch->ldb;
+       struct drm_encoder *encoder = &imx_ldb_ch->encoder;
        int ret;
 
-       ret = imx_drm_encoder_parse_of(drm, &imx_ldb_ch->encoder,
-                                      imx_ldb_ch->child);
+       ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
        if (ret)
                return ret;
 
@@ -412,9 +465,8 @@ static int imx_ldb_register(struct drm_device *drm,
                        return ret;
        }
 
-       drm_encoder_helper_add(&imx_ldb_ch->encoder,
-                       &imx_ldb_encoder_helper_funcs);
-       drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
+       drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
+       drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
                         DRM_MODE_ENCODER_LVDS, NULL);
 
        drm_connector_helper_add(&imx_ldb_ch->connector,
@@ -422,11 +474,14 @@ static int imx_ldb_register(struct drm_device *drm,
        drm_connector_init(drm, &imx_ldb_ch->connector,
                           &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
 
-       if (imx_ldb_ch->panel)
-               drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector);
+       if (imx_ldb_ch->panel) {
+               ret = drm_panel_attach(imx_ldb_ch->panel,
+                                      &imx_ldb_ch->connector);
+               if (ret)
+                       return ret;
+       }
 
-       drm_mode_connector_attach_encoder(&imx_ldb_ch->connector,
-                       &imx_ldb_ch->encoder);
+       drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
 
        return 0;
 }
@@ -553,7 +608,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 
        for_each_child_of_node(np, child) {
                struct imx_ldb_channel *channel;
-               struct device_node *port;
+               struct device_node *ddc_node;
+               struct device_node *ep;
+               int bus_format;
 
                ret = of_property_read_u32(child, "reg", &i);
                if (ret || i < 0 || i > 1)
@@ -576,50 +633,72 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                 * The output port is port@4 with an external 4-port mux or
                 * port@2 with the internal 2-port mux.
                 */
-               port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2);
-               if (port) {
-                       struct device_node *endpoint, *remote;
-
-                       endpoint = of_get_child_by_name(port, "endpoint");
-                       if (endpoint) {
-                               remote = of_graph_get_remote_port_parent(endpoint);
-                               if (remote)
-                                       channel->panel = of_drm_find_panel(remote);
-                               else
-                                       return -EPROBE_DEFER;
-                               if (!channel->panel) {
-                                       dev_err(dev, "panel not found: %s\n",
-                                               remote->full_name);
-                                       return -EPROBE_DEFER;
-                               }
+               ep = of_graph_get_endpoint_by_regs(child,
+                                                  imx_ldb->lvds_mux ? 4 : 2,
+                                                  -1);
+               if (ep) {
+                       struct device_node *remote;
+
+                       remote = of_graph_get_remote_port_parent(ep);
+                       of_node_put(ep);
+                       if (remote)
+                               channel->panel = of_drm_find_panel(remote);
+                       else
+                               return -EPROBE_DEFER;
+                       of_node_put(remote);
+                       if (!channel->panel) {
+                               dev_err(dev, "panel not found: %s\n",
+                                       remote->full_name);
+                               return -EPROBE_DEFER;
+                       }
+               }
+
+               ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
+               if (ddc_node) {
+                       channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
+                       of_node_put(ddc_node);
+                       if (!channel->ddc) {
+                               dev_warn(dev, "failed to get ddc i2c adapter\n");
+                               return -EPROBE_DEFER;
                        }
                }
 
-               edidp = of_get_property(child, "edid", &channel->edid_len);
-               if (edidp) {
-                       channel->edid = kmemdup(edidp, channel->edid_len,
-                                               GFP_KERNEL);
-               } else if (!channel->panel) {
-                       ret = of_get_drm_display_mode(child, &channel->mode, 0);
-                       if (!ret)
-                               channel->mode_valid = 1;
+               if (!channel->ddc) {
+                       /* if no DDC available, fallback to hardcoded EDID */
+                       dev_dbg(dev, "no ddc available\n");
+
+                       edidp = of_get_property(child, "edid",
+                                               &channel->edid_len);
+                       if (edidp) {
+                               channel->edid = kmemdup(edidp,
+                                                       channel->edid_len,
+                                                       GFP_KERNEL);
+                       } else if (!channel->panel) {
+                               /* fallback to display-timings node */
+                               ret = of_get_drm_display_mode(child,
+                                                             &channel->mode,
+                                                             OF_USE_NATIVE_MODE);
+                               if (!ret)
+                                       channel->mode_valid = 1;
+                       }
                }
 
-               channel->bus_format = of_get_bus_format(dev, child);
-               if (channel->bus_format == -EINVAL) {
+               bus_format = of_get_bus_format(dev, child);
+               if (bus_format == -EINVAL) {
                        /*
                         * If no bus format was specified in the device tree,
                         * we can still get it from the connected panel later.
                         */
                        if (channel->panel && channel->panel->funcs &&
                            channel->panel->funcs->get_modes)
-                               channel->bus_format = 0;
+                               bus_format = 0;
                }
-               if (channel->bus_format < 0) {
+               if (bus_format < 0) {
                        dev_err(dev, "could not determine data mapping: %d\n",
-                               channel->bus_format);
-                       return channel->bus_format;
+                               bus_format);
+                       return bus_format;
                }
+               channel->bus_format = bus_format;
 
                ret = imx_ldb_register(drm, channel);
                if (ret)
@@ -647,6 +726,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
                channel->encoder.funcs->destroy(&channel->encoder);
 
                kfree(channel->edid);
+               i2c_put_adapter(channel->ddc);
        }
 }
 
index ae7a9fb3b8a229404c2d4725e936b04ec1bdd52b..5e875944ffa214f48f06d7ed8b687aabd1705ffb 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/spinlock.h>
 #include <linux/videodev2.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <video/imx-ipu-v3.h>
@@ -97,9 +98,6 @@
 /* TVE_TST_MODE_REG */
 #define TVE_TVDAC_TEST_MODE_MASK       (0x7 << 0)
 
-#define con_to_tve(x) container_of(x, struct imx_tve, connector)
-#define enc_to_tve(x) container_of(x, struct imx_tve, encoder)
-
 enum {
        TVE_MODE_TVOUT,
        TVE_MODE_VGA,
@@ -112,6 +110,8 @@ struct imx_tve {
        spinlock_t lock;        /* register lock */
        bool enabled;
        int mode;
+       int di_hsync_pin;
+       int di_vsync_pin;
 
        struct regmap *regmap;
        struct regulator *dac_reg;
@@ -120,10 +120,18 @@ struct imx_tve {
        struct clk *di_sel_clk;
        struct clk_hw clk_hw_di;
        struct clk *di_clk;
-       int vsync_pin;
-       int hsync_pin;
 };
 
+static inline struct imx_tve *con_to_tve(struct drm_connector *c)
+{
+       return container_of(c, struct imx_tve, connector);
+}
+
+static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
+{
+       return container_of(e, struct imx_tve, encoder);
+}
+
 static void tve_lock(void *__tve)
 __acquires(&tve->lock)
 {
@@ -148,8 +156,7 @@ static void tve_enable(struct imx_tve *tve)
                tve->enabled = true;
                clk_prepare_enable(tve->clk);
                ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
-                                        TVE_IPU_CLK_EN | TVE_EN,
-                                        TVE_IPU_CLK_EN | TVE_EN);
+                                        TVE_EN, TVE_EN);
        }
 
        /* clear interrupt status register */
@@ -172,7 +179,7 @@ static void tve_disable(struct imx_tve *tve)
        if (tve->enabled) {
                tve->enabled = false;
                ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
-                                        TVE_IPU_CLK_EN | TVE_EN, 0);
+                                        TVE_EN, 0);
                clk_disable_unprepare(tve->clk);
        }
 }
@@ -275,34 +282,6 @@ static struct drm_encoder *imx_tve_connector_best_encoder(
        return &tve->encoder;
 }
 
-static void imx_tve_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct imx_tve *tve = enc_to_tve(encoder);
-       int ret;
-
-       ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
-                                TVE_TV_OUT_MODE_MASK, TVE_TV_OUT_DISABLE);
-       if (ret < 0)
-               dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret);
-}
-
-static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
-{
-       struct imx_tve *tve = enc_to_tve(encoder);
-
-       tve_disable(tve);
-
-       switch (tve->mode) {
-       case TVE_MODE_VGA:
-               imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
-                                           tve->hsync_pin, tve->vsync_pin);
-               break;
-       case TVE_MODE_TVOUT:
-               imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
-               break;
-       }
-}
-
 static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
                                     struct drm_display_mode *orig_mode,
                                     struct drm_display_mode *mode)
@@ -331,6 +310,9 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
                        ret);
        }
 
+       regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+                          TVE_IPU_CLK_EN, TVE_IPU_CLK_EN);
+
        if (tve->mode == TVE_MODE_VGA)
                ret = tve_setup_vga(tve);
        else
@@ -339,7 +321,7 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
                dev_err(tve->dev, "failed to set configuration: %d\n", ret);
 }
 
-static void imx_tve_encoder_commit(struct drm_encoder *encoder)
+static void imx_tve_encoder_enable(struct drm_encoder *encoder)
 {
        struct imx_tve *tve = enc_to_tve(encoder);
 
@@ -353,11 +335,28 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder)
        tve_disable(tve);
 }
 
+static int imx_tve_atomic_check(struct drm_encoder *encoder,
+                               struct drm_crtc_state *crtc_state,
+                               struct drm_connector_state *conn_state)
+{
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+       struct imx_tve *tve = enc_to_tve(encoder);
+
+       imx_crtc_state->bus_format = MEDIA_BUS_FMT_GBR888_1X24;
+       imx_crtc_state->di_hsync_pin = tve->di_hsync_pin;
+       imx_crtc_state->di_vsync_pin = tve->di_vsync_pin;
+
+       return 0;
+}
+
 static const struct drm_connector_funcs imx_tve_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = imx_tve_connector_detect,
        .destroy = imx_drm_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
@@ -371,11 +370,10 @@ static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
 };
 
 static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
-       .dpms = imx_tve_encoder_dpms,
-       .prepare = imx_tve_encoder_prepare,
        .mode_set = imx_tve_encoder_mode_set,
-       .commit = imx_tve_encoder_commit,
+       .enable = imx_tve_encoder_enable,
        .disable = imx_tve_encoder_disable,
+       .atomic_check = imx_tve_atomic_check,
 };
 
 static irqreturn_t imx_tve_irq_handler(int irq, void *data)
@@ -493,8 +491,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
        encoder_type = tve->mode == TVE_MODE_VGA ?
                                DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
 
-       ret = imx_drm_encoder_parse_of(drm, &tve->encoder,
-                                      tve->dev->of_node);
+       ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
        if (ret)
                return ret;
 
@@ -585,15 +582,15 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
 
        if (tve->mode == TVE_MODE_VGA) {
                ret = of_property_read_u32(np, "fsl,hsync-pin",
-                                          &tve->hsync_pin);
+                                          &tve->di_hsync_pin);
 
                if (ret < 0) {
-                       dev_err(dev, "failed to get vsync pin\n");
+                       dev_err(dev, "failed to get hsync pin\n");
                        return ret;
                }
 
-               ret |= of_property_read_u32(np, "fsl,vsync-pin",
-                                           &tve->vsync_pin);
+               ret = of_property_read_u32(np, "fsl,vsync-pin",
+                                          &tve->di_vsync_pin);
 
                if (ret < 0) {
                        dev_err(dev, "failed to get vsync pin\n");
@@ -631,7 +628,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
 
        tve->dac_reg = devm_regulator_get(dev, "dac");
        if (!IS_ERR(tve->dac_reg)) {
-               regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+               ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
+               if (ret)
+                       return ret;
                ret = regulator_enable(tve->dac_reg);
                if (ret)
                        return ret;
index b2c30b8d981646719cbf3f3a1620aec834549b73..08e188bc10fc6200501d4f13fccc999970a35566 100644 (file)
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <linux/fb.h>
 #include <linux/clk.h>
 #include <linux/errno.h>
-#include <linux/reservation.h>
-#include <linux/dma-buf.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 
 
 #define DRIVER_DESC            "i.MX IPUv3 Graphics"
 
-enum ipu_flip_status {
-       IPU_FLIP_NONE,
-       IPU_FLIP_PENDING,
-       IPU_FLIP_SUBMITTED,
-};
-
-struct ipu_flip_work {
-       struct work_struct              unref_work;
-       struct drm_gem_object           *bo;
-       struct drm_pending_vblank_event *page_flip_event;
-       struct work_struct              fence_work;
-       struct ipu_crtc                 *crtc;
-       struct fence                    *excl;
-       unsigned                        shared_count;
-       struct fence                    **shared;
-};
-
 struct ipu_crtc {
        struct device           *dev;
        struct drm_crtc         base;
@@ -60,200 +43,166 @@ struct ipu_crtc {
 
        struct ipu_dc           *dc;
        struct ipu_di           *di;
-       int                     enabled;
-       enum ipu_flip_status    flip_state;
-       struct workqueue_struct *flip_queue;
-       struct ipu_flip_work    *flip_work;
        int                     irq;
-       u32                     bus_format;
-       int                     di_hsync_pin;
-       int                     di_vsync_pin;
 };
 
-#define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base)
+static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
+{
+       return container_of(crtc, struct ipu_crtc, base);
+}
 
-static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_enable(struct drm_crtc *crtc)
 {
+       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
 
-       if (ipu_crtc->enabled)
-               return;
-
        ipu_dc_enable(ipu);
-       ipu_plane_enable(ipu_crtc->plane[0]);
-       /* Start DC channel and DI after IDMAC */
        ipu_dc_enable_channel(ipu_crtc->dc);
        ipu_di_enable(ipu_crtc->di);
-       drm_crtc_vblank_on(&ipu_crtc->base);
-
-       ipu_crtc->enabled = 1;
 }
 
-static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
+static void ipu_crtc_disable(struct drm_crtc *crtc)
 {
+       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
        struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
 
-       if (!ipu_crtc->enabled)
-               return;
-
-       /* Stop DC channel and DI before IDMAC */
        ipu_dc_disable_channel(ipu_crtc->dc);
        ipu_di_disable(ipu_crtc->di);
-       ipu_plane_disable(ipu_crtc->plane[0]);
        ipu_dc_disable(ipu);
-       drm_crtc_vblank_off(&ipu_crtc->base);
 
-       ipu_crtc->enabled = 0;
+       spin_lock_irq(&crtc->dev->event_lock);
+       if (crtc->state->event) {
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               crtc->state->event = NULL;
+       }
+       spin_unlock_irq(&crtc->dev->event_lock);
 }
 
-static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void imx_drm_crtc_reset(struct drm_crtc *crtc)
 {
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+       struct imx_crtc_state *state;
 
-       dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode);
-
-       switch (mode) {
-       case DRM_MODE_DPMS_ON:
-               ipu_fb_enable(ipu_crtc);
-               break;
-       case DRM_MODE_DPMS_STANDBY:
-       case DRM_MODE_DPMS_SUSPEND:
-       case DRM_MODE_DPMS_OFF:
-               ipu_fb_disable(ipu_crtc);
-               break;
+       if (crtc->state) {
+               if (crtc->state->mode_blob)
+                       drm_property_unreference_blob(crtc->state->mode_blob);
+
+               state = to_imx_crtc_state(crtc->state);
+               memset(state, 0, sizeof(*state));
+       } else {
+               state = kzalloc(sizeof(*state), GFP_KERNEL);
+               if (!state)
+                       return;
+               crtc->state = &state->base;
        }
+
+       state->base.crtc = crtc;
 }
 
-static void ipu_flip_unref_work_func(struct work_struct *__work)
+static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc)
 {
-       struct ipu_flip_work *work =
-                       container_of(__work, struct ipu_flip_work, unref_work);
+       struct imx_crtc_state *state;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return NULL;
 
-       drm_gem_object_unreference_unlocked(work->bo);
-       kfree(work);
+       __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+       WARN_ON(state->base.crtc != crtc);
+       state->base.crtc = crtc;
+
+       return &state->base;
 }
 
-static void ipu_flip_fence_work_func(struct work_struct *__work)
+static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
+                                      struct drm_crtc_state *state)
 {
-       struct ipu_flip_work *work =
-                       container_of(__work, struct ipu_flip_work, fence_work);
-       int i;
-
-       /* wait for all fences attached to the FB obj to signal */
-       if (work->excl) {
-               fence_wait(work->excl, false);
-               fence_put(work->excl);
-       }
-       for (i = 0; i < work->shared_count; i++) {
-               fence_wait(work->shared[i], false);
-               fence_put(work->shared[i]);
-       }
+       __drm_atomic_helper_crtc_destroy_state(state);
+       kfree(to_imx_crtc_state(state));
+}
+
+static const struct drm_crtc_funcs ipu_crtc_funcs = {
+       .set_config = drm_atomic_helper_set_config,
+       .destroy = drm_crtc_cleanup,
+       .page_flip = drm_atomic_helper_page_flip,
+       .reset = imx_drm_crtc_reset,
+       .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
+       .atomic_destroy_state = imx_drm_crtc_destroy_state,
+};
 
-       work->crtc->flip_state = IPU_FLIP_SUBMITTED;
+static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
+{
+       struct ipu_crtc *ipu_crtc = dev_id;
+
+       imx_drm_handle_vblank(ipu_crtc->imx_crtc);
+
+       return IRQ_HANDLED;
 }
 
-static int ipu_page_flip(struct drm_crtc *crtc,
-               struct drm_framebuffer *fb,
-               struct drm_pending_vblank_event *event,
-               uint32_t page_flip_flags)
+static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
+                                 const struct drm_display_mode *mode,
+                                 struct drm_display_mode *adjusted_mode)
 {
-       struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-       struct ipu_flip_work *flip_work;
+       struct videomode vm;
        int ret;
 
-       if (ipu_crtc->flip_state != IPU_FLIP_NONE)
-               return -EBUSY;
-
-       ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
-       if (ret) {
-               dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
-               list_del(&event->base.link);
-
-               return ret;
-       }
+       drm_display_mode_to_videomode(adjusted_mode, &vm);
 
-       flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL);
-       if (!flip_work) {
-               ret = -ENOMEM;
-               goto put_vblank;
-       }
-       INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
-       flip_work->page_flip_event = event;
+       ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
+       if (ret)
+               return false;
 
-       /* get BO backing the old framebuffer and take a reference */
-       flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base;
-       drm_gem_object_reference(flip_work->bo);
+       if ((vm.vsync_len == 0) || (vm.hsync_len == 0))
+               return false;
 
-       ipu_crtc->flip_work = flip_work;
-       /*
-        * If the object has a DMABUF attached, we need to wait on its fences
-        * if there are any.
-        */
-       if (cma_obj->base.dma_buf) {
-               INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
-               flip_work->crtc = ipu_crtc;
+       drm_display_mode_from_videomode(&vm, adjusted_mode);
 
-               ret = reservation_object_get_fences_rcu(
-                               cma_obj->base.dma_buf->resv, &flip_work->excl,
-                               &flip_work->shared_count, &flip_work->shared);
+       return true;
+}
 
-               if (unlikely(ret)) {
-                       DRM_ERROR("failed to get fences for buffer\n");
-                       goto free_flip_work;
-               }
+static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
+                                struct drm_crtc_state *state)
+{
+       u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
 
-               /* No need to queue the worker if the are no fences */
-               if (!flip_work->excl && !flip_work->shared_count) {
-                       ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
-               } else {
-                       ipu_crtc->flip_state = IPU_FLIP_PENDING;
-                       queue_work(ipu_crtc->flip_queue,
-                                  &flip_work->fence_work);
-               }
-       } else {
-               ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
-       }
+       if (state->active && (primary_plane_mask & state->plane_mask) == 0)
+               return -EINVAL;
 
        return 0;
-
-free_flip_work:
-       drm_gem_object_unreference_unlocked(flip_work->bo);
-       kfree(flip_work);
-       ipu_crtc->flip_work = NULL;
-put_vblank:
-       imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-
-       return ret;
 }
 
-static const struct drm_crtc_funcs ipu_crtc_funcs = {
-       .set_config = drm_crtc_helper_set_config,
-       .destroy = drm_crtc_cleanup,
-       .page_flip = ipu_page_flip,
-};
+static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
+                                 struct drm_crtc_state *old_crtc_state)
+{
+       spin_lock_irq(&crtc->dev->event_lock);
+       if (crtc->state->event) {
+               WARN_ON(drm_crtc_vblank_get(crtc));
+               drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+               crtc->state->event = NULL;
+       }
+       spin_unlock_irq(&crtc->dev->event_lock);
+}
 
-static int ipu_crtc_mode_set(struct drm_crtc *crtc,
-                              struct drm_display_mode *orig_mode,
-                              struct drm_display_mode *mode,
-                              int x, int y,
-                              struct drm_framebuffer *old_fb)
+static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_encoder *encoder;
        struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
+       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
        struct ipu_di_signal_cfg sig_cfg = {};
        unsigned long encoder_types = 0;
-       int ret;
 
        dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
                        mode->hdisplay);
        dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
                        mode->vdisplay);
 
-       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
                if (encoder->crtc == crtc)
                        encoder_types |= BIT(encoder->encoder_type);
+       }
 
        dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n",
                __func__, encoder_types);
@@ -271,112 +220,30 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
        else
                sig_cfg.clkflags = 0;
 
-       sig_cfg.enable_pol = 1;
-       sig_cfg.clk_pol = 0;
-       sig_cfg.bus_format = ipu_crtc->bus_format;
+       sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
+       /* Default to driving pixel data on negative clock edges */
+       sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
+                            DRM_BUS_FLAG_PIXDATA_POSEDGE);
+       sig_cfg.bus_format = imx_crtc_state->bus_format;
        sig_cfg.v_to_h_sync = 0;
-       sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
-       sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
+       sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
+       sig_cfg.vsync_pin = imx_crtc_state->di_vsync_pin;
 
        drm_display_mode_to_videomode(mode, &sig_cfg.mode);
 
-       ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
-                              mode->flags & DRM_MODE_FLAG_INTERLACE,
-                              ipu_crtc->bus_format, mode->hdisplay);
-       if (ret) {
-               dev_err(ipu_crtc->dev,
-                               "initializing display controller failed with %d\n",
-                               ret);
-               return ret;
-       }
-
-       ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
-       if (ret) {
-               dev_err(ipu_crtc->dev,
-                               "initializing panel failed with %d\n", ret);
-               return ret;
-       }
-
-       return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode,
-                                 crtc->primary->fb,
-                                 0, 0, mode->hdisplay, mode->vdisplay,
-                                 x, y, mode->hdisplay, mode->vdisplay,
-                                 mode->flags & DRM_MODE_FLAG_INTERLACE);
-}
-
-static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
-{
-       unsigned long flags;
-       struct drm_device *drm = ipu_crtc->base.dev;
-       struct ipu_flip_work *work = ipu_crtc->flip_work;
-
-       spin_lock_irqsave(&drm->event_lock, flags);
-       if (work->page_flip_event)
-               drm_crtc_send_vblank_event(&ipu_crtc->base,
-                                          work->page_flip_event);
-       imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
-       spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
-static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
-{
-       struct ipu_crtc *ipu_crtc = dev_id;
-
-       imx_drm_handle_vblank(ipu_crtc->imx_crtc);
-
-       if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
-               struct ipu_plane *plane = ipu_crtc->plane[0];
-
-               ipu_plane_set_base(plane, ipu_crtc->base.primary->fb,
-                                  plane->x, plane->y);
-               ipu_crtc_handle_pageflip(ipu_crtc);
-               queue_work(ipu_crtc->flip_queue,
-                          &ipu_crtc->flip_work->unref_work);
-               ipu_crtc->flip_state = IPU_FLIP_NONE;
-       }
-
-       return IRQ_HANDLED;
-}
-
-static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
-                                 const struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode)
-{
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-       struct videomode vm;
-       int ret;
-
-       drm_display_mode_to_videomode(adjusted_mode, &vm);
-
-       ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
-       if (ret)
-               return false;
-
-       drm_display_mode_from_videomode(&vm, adjusted_mode);
-
-       return true;
-}
-
-static void ipu_crtc_prepare(struct drm_crtc *crtc)
-{
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
-       ipu_fb_disable(ipu_crtc);
-}
-
-static void ipu_crtc_commit(struct drm_crtc *crtc)
-{
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
-       ipu_fb_enable(ipu_crtc);
+       ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
+                        mode->flags & DRM_MODE_FLAG_INTERLACE,
+                        imx_crtc_state->bus_format, mode->hdisplay);
+       ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
 }
 
 static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
-       .dpms = ipu_crtc_dpms,
        .mode_fixup = ipu_crtc_mode_fixup,
-       .mode_set = ipu_crtc_mode_set,
-       .prepare = ipu_crtc_prepare,
-       .commit = ipu_crtc_commit,
+       .mode_set_nofb = ipu_crtc_mode_set_nofb,
+       .atomic_check = ipu_crtc_atomic_check,
+       .atomic_begin = ipu_crtc_atomic_begin,
+       .disable = ipu_crtc_disable,
+       .enable = ipu_crtc_enable,
 };
 
 static int ipu_enable_vblank(struct drm_crtc *crtc)
@@ -395,22 +262,9 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
        disable_irq_nosync(ipu_crtc->irq);
 }
 
-static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
-               u32 bus_format, int hsync_pin, int vsync_pin)
-{
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
-       ipu_crtc->bus_format = bus_format;
-       ipu_crtc->di_hsync_pin = hsync_pin;
-       ipu_crtc->di_vsync_pin = vsync_pin;
-
-       return 0;
-}
-
 static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = {
        .enable_vblank = ipu_enable_vblank,
        .disable_vblank = ipu_disable_vblank,
-       .set_interface_pix_fmt = ipu_set_interface_pix_fmt,
        .crtc_funcs = &ipu_crtc_funcs,
        .crtc_helper_funcs = &ipu_helper_funcs,
 };
@@ -492,8 +346,16 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
                                                IPU_DP_FLOW_SYNC_FG,
                                                drm_crtc_mask(&ipu_crtc->base),
                                                DRM_PLANE_TYPE_OVERLAY);
-               if (IS_ERR(ipu_crtc->plane[1]))
+               if (IS_ERR(ipu_crtc->plane[1])) {
                        ipu_crtc->plane[1] = NULL;
+               } else {
+                       ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
+                       if (ret) {
+                               dev_err(ipu_crtc->dev, "getting plane 1 "
+                                       "resources failed with %d.\n", ret);
+                               goto err_put_plane0_res;
+                       }
+               }
        }
 
        ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -501,16 +363,17 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
                        "imx_drm", ipu_crtc);
        if (ret < 0) {
                dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
-               goto err_put_plane_res;
+               goto err_put_plane1_res;
        }
        /* Only enable IRQ when we actually need it to trigger work. */
        disable_irq(ipu_crtc->irq);
 
-       ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
-
        return 0;
 
-err_put_plane_res:
+err_put_plane1_res:
+       if (ipu_crtc->plane[1])
+               ipu_plane_put_resources(ipu_crtc->plane[1]);
+err_put_plane0_res:
        ipu_plane_put_resources(ipu_crtc->plane[0]);
 err_remove_crtc:
        imx_drm_remove_crtc(ipu_crtc->imx_crtc);
@@ -549,9 +412,10 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
 
        imx_drm_remove_crtc(ipu_crtc->imx_crtc);
 
-       destroy_workqueue(ipu_crtc->flip_queue);
-       ipu_plane_put_resources(ipu_crtc->plane[0]);
        ipu_put_resources(ipu_crtc);
+       if (ipu_crtc->plane[1])
+               ipu_plane_put_resources(ipu_crtc->plane[1]);
+       ipu_plane_put_resources(ipu_crtc->plane[0]);
 }
 
 static const struct component_ops ipu_crtc_ops = {
index 681ec6eb77d916fc3be6a47d783fab54ed1f07fb..4ad67d015ec7fdcb32dc84b17ebb99eb185590d2 100644 (file)
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include "video/imx-ipu-v3.h"
 #include "ipuv3-plane.h"
 
-#define to_ipu_plane(x)        container_of(x, struct ipu_plane, base)
+static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
+{
+       return container_of(p, struct ipu_plane, base);
+}
 
 static const uint32_t ipu_plane_formats[] = {
        DRM_FORMAT_ARGB1555,
@@ -38,6 +44,8 @@ static const uint32_t ipu_plane_formats[] = {
        DRM_FORMAT_RGBX8888,
        DRM_FORMAT_BGRA8888,
        DRM_FORMAT_BGRA8888,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
        DRM_FORMAT_YUYV,
        DRM_FORMAT_YVYU,
        DRM_FORMAT_YUV420,
@@ -51,62 +59,67 @@ int ipu_plane_irq(struct ipu_plane *ipu_plane)
                                     IPU_IRQ_EOF);
 }
 
-static int calc_vref(struct drm_display_mode *mode)
+static inline unsigned long
+drm_plane_state_to_eba(struct drm_plane_state *state)
 {
-       unsigned long htotal, vtotal;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_gem_cma_object *cma_obj;
 
-       htotal = mode->htotal;
-       vtotal = mode->vtotal;
+       cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+       BUG_ON(!cma_obj);
 
-       if (!htotal || !vtotal)
-               return 60;
-
-       return DIV_ROUND_UP(mode->clock * 1000, vtotal * htotal);
+       return cma_obj->paddr + fb->offsets[0] +
+              fb->pitches[0] * (state->src_y >> 16) +
+              (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
 }
 
-static inline int calc_bandwidth(int width, int height, unsigned int vref)
+static inline unsigned long
+drm_plane_state_to_ubo(struct drm_plane_state *state)
 {
-       return width * height * vref;
-}
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_gem_cma_object *cma_obj;
+       unsigned long eba = drm_plane_state_to_eba(state);
 
-int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
-                      int x, int y)
-{
-       struct drm_gem_cma_object *cma_obj[3];
-       unsigned long eba, ubo, vbo;
-       int active, i;
+       cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
+       BUG_ON(!cma_obj);
 
-       for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
-               cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i);
-               if (!cma_obj[i]) {
-                       DRM_DEBUG_KMS("plane %d entry is null.\n", i);
-                       return -EFAULT;
-               }
-       }
+       return cma_obj->paddr + fb->offsets[1] +
+              fb->pitches[1] * (state->src_y >> 16) / 2 +
+              (state->src_x >> 16) / 2 - eba;
+}
 
-       eba = cma_obj[0]->paddr + fb->offsets[0] +
-             fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x;
+static inline unsigned long
+drm_plane_state_to_vbo(struct drm_plane_state *state)
+{
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_gem_cma_object *cma_obj;
+       unsigned long eba = drm_plane_state_to_eba(state);
 
-       if (eba & 0x7) {
-               DRM_DEBUG_KMS("base address must be a multiple of 8.\n");
-               return -EINVAL;
-       }
+       cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
+       BUG_ON(!cma_obj);
 
-       if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) {
-               DRM_DEBUG_KMS("pitches out of range.\n");
-               return -EINVAL;
-       }
+       return cma_obj->paddr + fb->offsets[2] +
+              fb->pitches[2] * (state->src_y >> 16) / 2 +
+              (state->src_x >> 16) / 2 - eba;
+}
 
-       if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) {
-               DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n");
-               return -EINVAL;
-       }
+static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
+                                     struct drm_plane_state *old_state)
+{
+       struct drm_plane *plane = &ipu_plane->base;
+       struct drm_plane_state *state = plane->state;
+       struct drm_framebuffer *fb = state->fb;
+       unsigned long eba, ubo, vbo;
+       int active;
 
-       ipu_plane->stride[0] = fb->pitches[0];
+       eba = drm_plane_state_to_eba(state);
 
        switch (fb->pixel_format) {
        case DRM_FORMAT_YUV420:
        case DRM_FORMAT_YVU420:
+               if (old_state->fb)
+                       break;
+
                /*
                 * Multiplanar formats have to meet the following restrictions:
                 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
@@ -115,59 +128,28 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
                 * - Only EBA may be changed while scanout is active
                 * - The strides of U and V planes must be identical.
                 */
-               ubo = cma_obj[1]->paddr + fb->offsets[1] +
-                     fb->pitches[1] * y / 2 + x / 2 - eba;
-               vbo = cma_obj[2]->paddr + fb->offsets[2] +
-                     fb->pitches[2] * y / 2 + x / 2 - eba;
+               ubo = drm_plane_state_to_ubo(state);
+               vbo = drm_plane_state_to_vbo(state);
 
-               if ((ubo & 0x7) || (vbo & 0x7)) {
-                       DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n");
-                       return -EINVAL;
-               }
-
-               if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) {
-                       DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
-                       return -EINVAL;
-               }
-
-               if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
-                                          (ipu_plane->v_offset != vbo))) {
-                       DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
-                       return -EINVAL;
-               }
-
-               if (fb->pitches[1] != fb->pitches[2]) {
-                       DRM_DEBUG_KMS("U/V pitches must be identical.\n");
-                       return -EINVAL;
-               }
-
-               if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
-                       DRM_DEBUG_KMS("U/V pitches out of range.\n");
-                       return -EINVAL;
-               }
-
-               if (ipu_plane->enabled &&
-                   (ipu_plane->stride[1] != fb->pitches[1])) {
-                       DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
-                       return -EINVAL;
-               }
-
-               ipu_plane->u_offset = ubo;
-               ipu_plane->v_offset = vbo;
-               ipu_plane->stride[1] = fb->pitches[1];
+               if (fb->pixel_format == DRM_FORMAT_YUV420)
+                       ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+                                                     fb->pitches[1], ubo, vbo);
+               else
+                       ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
+                                                     fb->pitches[1], vbo, ubo);
 
                dev_dbg(ipu_plane->base.dev->dev,
-                       "phys = %pad %pad %pad, x = %d, y = %d",
-                       &cma_obj[0]->paddr, &cma_obj[1]->paddr,
-                       &cma_obj[2]->paddr, x, y);
+                       "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
+                       state->src_x >> 16, state->src_y >> 16);
                break;
        default:
-               dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d",
-                       &cma_obj[0]->paddr, x, y);
+               dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
+                       eba, state->src_x >> 16, state->src_y >> 16);
+
                break;
        }
 
-       if (ipu_plane->enabled) {
+       if (old_state->fb) {
                active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
                ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
                ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
@@ -175,155 +157,6 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
                ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
                ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
        }
-
-       /* cache offsets for subsequent pageflips */
-       ipu_plane->x = x;
-       ipu_plane->y = y;
-
-       return 0;
-}
-
-int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
-                      struct drm_display_mode *mode,
-                      struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                      unsigned int crtc_w, unsigned int crtc_h,
-                      uint32_t src_x, uint32_t src_y,
-                      uint32_t src_w, uint32_t src_h, bool interlaced)
-{
-       struct device *dev = ipu_plane->base.dev->dev;
-       int ret;
-
-       /* no scaling */
-       if (src_w != crtc_w || src_h != crtc_h)
-               return -EINVAL;
-
-       /* clip to crtc bounds */
-       if (crtc_x < 0) {
-               if (-crtc_x > crtc_w)
-                       return -EINVAL;
-               src_x += -crtc_x;
-               src_w -= -crtc_x;
-               crtc_w -= -crtc_x;
-               crtc_x = 0;
-       }
-       if (crtc_y < 0) {
-               if (-crtc_y > crtc_h)
-                       return -EINVAL;
-               src_y += -crtc_y;
-               src_h -= -crtc_y;
-               crtc_h -= -crtc_y;
-               crtc_y = 0;
-       }
-       if (crtc_x + crtc_w > mode->hdisplay) {
-               if (crtc_x > mode->hdisplay)
-                       return -EINVAL;
-               crtc_w = mode->hdisplay - crtc_x;
-               src_w = crtc_w;
-       }
-       if (crtc_y + crtc_h > mode->vdisplay) {
-               if (crtc_y > mode->vdisplay)
-                       return -EINVAL;
-               crtc_h = mode->vdisplay - crtc_y;
-               src_h = crtc_h;
-       }
-       /* full plane minimum width is 13 pixels */
-       if (crtc_w < 13 && (ipu_plane->dp_flow != IPU_DP_FLOW_SYNC_FG))
-               return -EINVAL;
-       if (crtc_h < 2)
-               return -EINVAL;
-
-       /*
-        * since we cannot touch active IDMAC channels, we do not support
-        * resizing the enabled plane or changing its format
-        */
-       if (ipu_plane->enabled) {
-               if (src_w != ipu_plane->w || src_h != ipu_plane->h ||
-                   fb->pixel_format != ipu_plane->base.fb->pixel_format)
-                       return -EINVAL;
-
-               return ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
-       }
-
-       switch (ipu_plane->dp_flow) {
-       case IPU_DP_FLOW_SYNC_BG:
-               ret = ipu_dp_setup_channel(ipu_plane->dp,
-                               IPUV3_COLORSPACE_RGB,
-                               IPUV3_COLORSPACE_RGB);
-               if (ret) {
-                       dev_err(dev,
-                               "initializing display processor failed with %d\n",
-                               ret);
-                       return ret;
-               }
-               ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
-               break;
-       case IPU_DP_FLOW_SYNC_FG:
-               ipu_dp_setup_channel(ipu_plane->dp,
-                               ipu_drm_fourcc_to_colorspace(fb->pixel_format),
-                               IPUV3_COLORSPACE_UNKNOWN);
-               ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
-               /* Enable local alpha on partial plane */
-               switch (fb->pixel_format) {
-               case DRM_FORMAT_ARGB1555:
-               case DRM_FORMAT_ABGR1555:
-               case DRM_FORMAT_RGBA5551:
-               case DRM_FORMAT_BGRA5551:
-               case DRM_FORMAT_ARGB4444:
-               case DRM_FORMAT_ARGB8888:
-               case DRM_FORMAT_ABGR8888:
-               case DRM_FORMAT_RGBA8888:
-               case DRM_FORMAT_BGRA8888:
-                       ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
-                       break;
-               default:
-                       break;
-               }
-       }
-
-       ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
-                       calc_bandwidth(crtc_w, crtc_h,
-                                      calc_vref(mode)), 64);
-       if (ret) {
-               dev_err(dev, "allocating dmfc bandwidth failed with %d\n", ret);
-               return ret;
-       }
-
-       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
-
-       ipu_cpmem_zero(ipu_plane->ipu_ch);
-       ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
-       ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
-       if (ret < 0) {
-               dev_err(dev, "unsupported pixel format 0x%08x\n",
-                       fb->pixel_format);
-               return ret;
-       }
-       ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
-       ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
-       ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
-
-       ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
-       if (ret < 0)
-               return ret;
-       if (interlaced)
-               ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
-
-       if (fb->pixel_format == DRM_FORMAT_YUV420) {
-               ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
-                                             ipu_plane->stride[1],
-                                             ipu_plane->u_offset,
-                                             ipu_plane->v_offset);
-       } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
-               ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
-                                             ipu_plane->stride[1],
-                                             ipu_plane->v_offset,
-                                             ipu_plane->u_offset);
-       }
-
-       ipu_plane->w = src_w;
-       ipu_plane->h = src_h;
-
-       return 0;
 }
 
 void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -370,7 +203,7 @@ err_out:
        return ret;
 }
 
-void ipu_plane_enable(struct ipu_plane *ipu_plane)
+static void ipu_plane_enable(struct ipu_plane *ipu_plane)
 {
        if (ipu_plane->dp)
                ipu_dp_enable(ipu_plane->ipu);
@@ -378,14 +211,10 @@ void ipu_plane_enable(struct ipu_plane *ipu_plane)
        ipu_idmac_enable_channel(ipu_plane->ipu_ch);
        if (ipu_plane->dp)
                ipu_dp_enable_channel(ipu_plane->dp);
-
-       ipu_plane->enabled = true;
 }
 
-void ipu_plane_disable(struct ipu_plane *ipu_plane)
+static void ipu_plane_disable(struct ipu_plane *ipu_plane)
 {
-       ipu_plane->enabled = false;
-
        ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
 
        if (ipu_plane->dp)
@@ -396,75 +225,225 @@ void ipu_plane_disable(struct ipu_plane *ipu_plane)
                ipu_dp_disable(ipu_plane->ipu);
 }
 
-/*
- * drm_plane API
- */
-
-static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-                           struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                           unsigned int crtc_w, unsigned int crtc_h,
-                           uint32_t src_x, uint32_t src_y,
-                           uint32_t src_w, uint32_t src_h)
+static int ipu_disable_plane(struct drm_plane *plane)
 {
        struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-       int ret = 0;
-
-       DRM_DEBUG_KMS("plane - %p\n", plane);
 
-       if (!ipu_plane->enabled)
-               ret = ipu_plane_get_resources(ipu_plane);
-       if (ret < 0)
-               return ret;
-
-       ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb,
-                       crtc_x, crtc_y, crtc_w, crtc_h,
-                       src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
-                       false);
-       if (ret < 0) {
-               ipu_plane_put_resources(ipu_plane);
-               return ret;
-       }
-
-       if (crtc != plane->crtc)
-               dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
-                               plane->crtc, crtc);
-       plane->crtc = crtc;
+       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-       if (!ipu_plane->enabled)
-               ipu_plane_enable(ipu_plane);
+       ipu_plane_disable(ipu_plane);
 
        return 0;
 }
 
-static int ipu_disable_plane(struct drm_plane *plane)
+static void ipu_plane_destroy(struct drm_plane *plane)
 {
        struct ipu_plane *ipu_plane = to_ipu_plane(plane);
 
        DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
 
-       if (ipu_plane->enabled)
-               ipu_plane_disable(ipu_plane);
+       ipu_disable_plane(plane);
+       drm_plane_cleanup(plane);
+       kfree(ipu_plane);
+}
 
-       ipu_plane_put_resources(ipu_plane);
+static const struct drm_plane_funcs ipu_plane_funcs = {
+       .update_plane   = drm_atomic_helper_update_plane,
+       .disable_plane  = drm_atomic_helper_disable_plane,
+       .destroy        = ipu_plane_destroy,
+       .reset          = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
+};
+
+static int ipu_plane_atomic_check(struct drm_plane *plane,
+                                 struct drm_plane_state *state)
+{
+       struct drm_plane_state *old_state = plane->state;
+       struct drm_crtc_state *crtc_state;
+       struct device *dev = plane->dev->dev;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_framebuffer *old_fb = old_state->fb;
+       unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+
+       /* Ok to disable */
+       if (!fb)
+               return 0;
+
+       if (!state->crtc)
+               return -EINVAL;
+
+       crtc_state =
+               drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+       if (WARN_ON(!crtc_state))
+               return -EINVAL;
+
+       /* CRTC should be enabled */
+       if (!crtc_state->enable)
+               return -EINVAL;
+
+       /* no scaling */
+       if (state->src_w >> 16 != state->crtc_w ||
+           state->src_h >> 16 != state->crtc_h)
+               return -EINVAL;
+
+       switch (plane->type) {
+       case DRM_PLANE_TYPE_PRIMARY:
+               /* full plane doesn't support partial off screen */
+               if (state->crtc_x || state->crtc_y ||
+                   state->crtc_w != crtc_state->adjusted_mode.hdisplay ||
+                   state->crtc_h != crtc_state->adjusted_mode.vdisplay)
+                       return -EINVAL;
+
+               /* full plane minimum width is 13 pixels */
+               if (state->crtc_w < 13)
+                       return -EINVAL;
+               break;
+       case DRM_PLANE_TYPE_OVERLAY:
+               if (state->crtc_x < 0 || state->crtc_y < 0)
+                       return -EINVAL;
+
+               if (state->crtc_x + state->crtc_w >
+                   crtc_state->adjusted_mode.hdisplay)
+                       return -EINVAL;
+               if (state->crtc_y + state->crtc_h >
+                   crtc_state->adjusted_mode.vdisplay)
+                       return -EINVAL;
+               break;
+       default:
+               dev_warn(dev, "Unsupported plane type\n");
+               return -EINVAL;
+       }
+
+       if (state->crtc_h < 2)
+               return -EINVAL;
+
+       /*
+        * since we cannot touch active IDMAC channels, we do not support
+        * resizing the enabled plane or changing its format
+        */
+       if (old_fb && (state->src_w != old_state->src_w ||
+                             state->src_h != old_state->src_h ||
+                             fb->pixel_format != old_fb->pixel_format))
+               return -EINVAL;
+
+       eba = drm_plane_state_to_eba(state);
+
+       if (eba & 0x7)
+               return -EINVAL;
+
+       if (fb->pitches[0] < 1 || fb->pitches[0] > 16384)
+               return -EINVAL;
+
+       if (old_fb && fb->pitches[0] != old_fb->pitches[0])
+               return -EINVAL;
+
+       switch (fb->pixel_format) {
+       case DRM_FORMAT_YUV420:
+       case DRM_FORMAT_YVU420:
+               /*
+                * Multiplanar formats have to meet the following restrictions:
+                * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
+                * - EBA, UBO and VBO are a multiple of 8
+                * - UBO and VBO are unsigned and not larger than 0xfffff8
+                * - Only EBA may be changed while scanout is active
+                * - The strides of U and V planes must be identical.
+                */
+               ubo = drm_plane_state_to_ubo(state);
+               vbo = drm_plane_state_to_vbo(state);
+
+               if ((ubo & 0x7) || (vbo & 0x7))
+                       return -EINVAL;
+
+               if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
+                       return -EINVAL;
+
+               if (old_fb) {
+                       old_ubo = drm_plane_state_to_ubo(old_state);
+                       old_vbo = drm_plane_state_to_vbo(old_state);
+                       if (ubo != old_ubo || vbo != old_vbo)
+                               return -EINVAL;
+               }
+
+               if (fb->pitches[1] != fb->pitches[2])
+                       return -EINVAL;
+
+               if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
+                       return -EINVAL;
+
+               if (old_fb && old_fb->pitches[1] != fb->pitches[1])
+                       return -EINVAL;
+       }
 
        return 0;
 }
 
-static void ipu_plane_destroy(struct drm_plane *plane)
+static void ipu_plane_atomic_disable(struct drm_plane *plane,
+                                    struct drm_plane_state *old_state)
+{
+       ipu_disable_plane(plane);
+}
+
+static void ipu_plane_atomic_update(struct drm_plane *plane,
+                                   struct drm_plane_state *old_state)
 {
        struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+       struct drm_plane_state *state = plane->state;
+       enum ipu_color_space ics;
 
-       DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+       if (old_state->fb) {
+               ipu_plane_atomic_set_base(ipu_plane, old_state);
+               return;
+       }
 
-       ipu_disable_plane(plane);
-       drm_plane_cleanup(plane);
-       kfree(ipu_plane);
+       switch (ipu_plane->dp_flow) {
+       case IPU_DP_FLOW_SYNC_BG:
+               ipu_dp_setup_channel(ipu_plane->dp,
+                                       IPUV3_COLORSPACE_RGB,
+                                       IPUV3_COLORSPACE_RGB);
+               ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
+               break;
+       case IPU_DP_FLOW_SYNC_FG:
+               ics = ipu_drm_fourcc_to_colorspace(state->fb->pixel_format);
+               ipu_dp_setup_channel(ipu_plane->dp, ics,
+                                       IPUV3_COLORSPACE_UNKNOWN);
+               ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x,
+                                       state->crtc_y);
+               /* Enable local alpha on partial plane */
+               switch (state->fb->pixel_format) {
+               case DRM_FORMAT_ARGB1555:
+               case DRM_FORMAT_ABGR1555:
+               case DRM_FORMAT_RGBA5551:
+               case DRM_FORMAT_BGRA5551:
+               case DRM_FORMAT_ARGB4444:
+               case DRM_FORMAT_ARGB8888:
+               case DRM_FORMAT_ABGR8888:
+               case DRM_FORMAT_RGBA8888:
+               case DRM_FORMAT_BGRA8888:
+                       ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, state->crtc_w);
+
+       ipu_cpmem_zero(ipu_plane->ipu_ch);
+       ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16,
+                                       state->src_h >> 16);
+       ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->pixel_format);
+       ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
+       ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
+       ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
+       ipu_plane_atomic_set_base(ipu_plane, old_state);
+       ipu_plane_enable(ipu_plane);
 }
 
-static struct drm_plane_funcs ipu_plane_funcs = {
-       .update_plane   = ipu_update_plane,
-       .disable_plane  = ipu_disable_plane,
-       .destroy        = ipu_plane_destroy,
+static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
+       .atomic_check = ipu_plane_atomic_check,
+       .atomic_disable = ipu_plane_atomic_disable,
+       .atomic_update = ipu_plane_atomic_update,
 };
 
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -497,5 +476,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
                return ERR_PTR(ret);
        }
 
+       drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
+
        return ipu_plane;
 }
index 4448fd4ad4eb503422a750f374c6b20a64b9f643..338b88a74eb6e06fc94db173c7cac0be8700f758 100644 (file)
@@ -23,17 +23,6 @@ struct ipu_plane {
 
        int                     dma;
        int                     dp_flow;
-
-       int                     x;
-       int                     y;
-       int                     w;
-       int                     h;
-
-       unsigned int            u_offset;
-       unsigned int            v_offset;
-       unsigned int            stride[2];
-
-       bool                    enabled;
 };
 
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -48,11 +37,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
                       uint32_t src_x, uint32_t src_y, uint32_t src_w,
                       uint32_t src_h, bool interlaced);
 
-void ipu_plane_enable(struct ipu_plane *plane);
-void ipu_plane_disable(struct ipu_plane *plane);
-int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb,
-                      int x, int y);
-
 int ipu_plane_get_resources(struct ipu_plane *plane);
 void ipu_plane_put_resources(struct ipu_plane *plane);
 
index 363e2c7741e2ea736e1cea604323116dc4ae0d75..1dad297b01fd5a5723b5cba7dd92b352057c7d7a 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/component.h>
 #include <linux/module.h>
 #include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_panel.h>
@@ -25,9 +26,6 @@
 
 #include "imx-drm.h"
 
-#define con_to_imxpd(x) container_of(x, struct imx_parallel_display, connector)
-#define enc_to_imxpd(x) container_of(x, struct imx_parallel_display, encoder)
-
 struct imx_parallel_display {
        struct drm_connector connector;
        struct drm_encoder encoder;
@@ -35,11 +33,21 @@ struct imx_parallel_display {
        void *edid;
        int edid_len;
        u32 bus_format;
-       int mode_valid;
        struct drm_display_mode mode;
        struct drm_panel *panel;
+       struct drm_bridge *bridge;
 };
 
+static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
+{
+       return container_of(c, struct imx_parallel_display, connector);
+}
+
+static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
+{
+       return container_of(e, struct imx_parallel_display, encoder);
+}
+
 static enum drm_connector_status imx_pd_connector_detect(
                struct drm_connector *connector, bool force)
 {
@@ -54,11 +62,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
 
        if (imxpd->panel && imxpd->panel->funcs &&
            imxpd->panel->funcs->get_modes) {
-               struct drm_display_info *di = &connector->display_info;
-
                num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
-               if (!imxpd->bus_format && di->num_bus_formats)
-                       imxpd->bus_format = di->bus_formats[0];
                if (num_modes > 0)
                        return num_modes;
        }
@@ -68,23 +72,18 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
                num_modes = drm_add_edid_modes(connector, imxpd->edid);
        }
 
-       if (imxpd->mode_valid) {
+       if (np) {
                struct drm_display_mode *mode = drm_mode_create(connector->dev);
+               int ret;
 
                if (!mode)
                        return -EINVAL;
-               drm_mode_copy(mode, &imxpd->mode);
-               mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
-               drm_mode_probed_add(connector, mode);
-               num_modes++;
-       }
 
-       if (np) {
-               struct drm_display_mode *mode = drm_mode_create(connector->dev);
+               ret = of_get_drm_display_mode(np, &imxpd->mode,
+                                             OF_USE_NATIVE_MODE);
+               if (ret)
+                       return ret;
 
-               if (!mode)
-                       return -EINVAL;
-               of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE);
                drm_mode_copy(mode, &imxpd->mode);
                mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
                drm_mode_probed_add(connector, mode);
@@ -102,50 +101,49 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
        return &imxpd->encoder;
 }
 
-static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void imx_pd_encoder_enable(struct drm_encoder *encoder)
 {
        struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
 
-       if (mode != DRM_MODE_DPMS_ON)
-               drm_panel_disable(imxpd->panel);
-       else
-               drm_panel_enable(imxpd->panel);
+       drm_panel_prepare(imxpd->panel);
+       drm_panel_enable(imxpd->panel);
 }
 
-static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
+static void imx_pd_encoder_disable(struct drm_encoder *encoder)
 {
        struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
 
-       imx_drm_set_bus_format(encoder, imxpd->bus_format);
+       drm_panel_disable(imxpd->panel);
+       drm_panel_unprepare(imxpd->panel);
 }
 
-static void imx_pd_encoder_commit(struct drm_encoder *encoder)
+static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
+                                      struct drm_crtc_state *crtc_state,
+                                      struct drm_connector_state *conn_state)
 {
+       struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+       struct drm_display_info *di = &conn_state->connector->display_info;
        struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
 
-       drm_panel_prepare(imxpd->panel);
-       drm_panel_enable(imxpd->panel);
-}
-
-static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
-                        struct drm_display_mode *orig_mode,
-                        struct drm_display_mode *mode)
-{
-}
-
-static void imx_pd_encoder_disable(struct drm_encoder *encoder)
-{
-       struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+       imx_crtc_state->bus_flags = di->bus_flags;
+       if (!imxpd->bus_format && di->num_bus_formats)
+               imx_crtc_state->bus_format = di->bus_formats[0];
+       else
+               imx_crtc_state->bus_format = imxpd->bus_format;
+       imx_crtc_state->di_hsync_pin = 2;
+       imx_crtc_state->di_vsync_pin = 3;
 
-       drm_panel_disable(imxpd->panel);
-       drm_panel_unprepare(imxpd->panel);
+       return 0;
 }
 
 static const struct drm_connector_funcs imx_pd_connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
+       .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = imx_pd_connector_detect,
        .destroy = imx_drm_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
 static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
@@ -158,20 +156,18 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
 };
 
 static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
-       .dpms = imx_pd_encoder_dpms,
-       .prepare = imx_pd_encoder_prepare,
-       .commit = imx_pd_encoder_commit,
-       .mode_set = imx_pd_encoder_mode_set,
+       .enable = imx_pd_encoder_enable,
        .disable = imx_pd_encoder_disable,
+       .atomic_check = imx_pd_encoder_atomic_check,
 };
 
 static int imx_pd_register(struct drm_device *drm,
        struct imx_parallel_display *imxpd)
 {
+       struct drm_encoder *encoder = &imxpd->encoder;
        int ret;
 
-       ret = imx_drm_encoder_parse_of(drm, &imxpd->encoder,
-                                      imxpd->dev->of_node);
+       ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
        if (ret)
                return ret;
 
@@ -182,19 +178,33 @@ static int imx_pd_register(struct drm_device *drm,
         */
        imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
 
-       drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
-       drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
+       drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
+       drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
                         DRM_MODE_ENCODER_NONE, NULL);
 
-       drm_connector_helper_add(&imxpd->connector,
-                       &imx_pd_connector_helper_funcs);
-       drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs,
-                          DRM_MODE_CONNECTOR_VGA);
+       if (!imxpd->bridge) {
+               drm_connector_helper_add(&imxpd->connector,
+                               &imx_pd_connector_helper_funcs);
+               drm_connector_init(drm, &imxpd->connector,
+                                  &imx_pd_connector_funcs,
+                                  DRM_MODE_CONNECTOR_VGA);
+       }
 
        if (imxpd->panel)
                drm_panel_attach(imxpd->panel, &imxpd->connector);
 
-       drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder);
+       if (imxpd->bridge) {
+               imxpd->bridge->encoder = encoder;
+               encoder->bridge = imxpd->bridge;
+               ret = drm_bridge_attach(drm, imxpd->bridge);
+               if (ret < 0) {
+                       dev_err(imxpd->dev, "failed to attach bridge: %d\n",
+                               ret);
+                       return ret;
+               }
+       } else {
+               drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
+       }
 
        return 0;
 }
@@ -203,10 +213,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
 {
        struct drm_device *drm = data;
        struct device_node *np = dev->of_node;
-       struct device_node *port;
+       struct device_node *ep;
        const u8 *edidp;
        struct imx_parallel_display *imxpd;
        int ret;
+       u32 bus_format = 0;
        const char *fmt;
 
        imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
@@ -220,28 +231,46 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
        ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
        if (!ret) {
                if (!strcmp(fmt, "rgb24"))
-                       imxpd->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+                       bus_format = MEDIA_BUS_FMT_RGB888_1X24;
                else if (!strcmp(fmt, "rgb565"))
-                       imxpd->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+                       bus_format = MEDIA_BUS_FMT_RGB565_1X16;
                else if (!strcmp(fmt, "bgr666"))
-                       imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+                       bus_format = MEDIA_BUS_FMT_RGB666_1X18;
                else if (!strcmp(fmt, "lvds666"))
-                       imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+                       bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
        }
+       imxpd->bus_format = bus_format;
 
        /* port@1 is the output port */
-       port = of_graph_get_port_by_id(np, 1);
-       if (port) {
-               struct device_node *endpoint, *remote;
-
-               endpoint = of_get_child_by_name(port, "endpoint");
-               if (endpoint) {
-                       remote = of_graph_get_remote_port_parent(endpoint);
-                       if (remote)
-                               imxpd->panel = of_drm_find_panel(remote);
-                       if (!imxpd->panel)
-                               return -EPROBE_DEFER;
+       ep = of_graph_get_endpoint_by_regs(np, 1, -1);
+       if (ep) {
+               struct device_node *remote;
+
+               remote = of_graph_get_remote_port_parent(ep);
+               if (!remote) {
+                       dev_warn(dev, "endpoint %s not connected\n",
+                                ep->full_name);
+                       of_node_put(ep);
+                       return -ENODEV;
+               }
+               of_node_put(ep);
+
+               imxpd->panel = of_drm_find_panel(remote);
+               if (imxpd->panel) {
+                       dev_dbg(dev, "found panel %s\n", remote->full_name);
+               } else {
+                       imxpd->bridge = of_drm_find_bridge(remote);
+                       if (imxpd->bridge)
+                               dev_dbg(dev, "found bridge %s\n",
+                                       remote->full_name);
+               }
+               if (!imxpd->panel && !imxpd->bridge) {
+                       dev_dbg(dev, "waiting for panel or bridge %s\n",
+                               remote->full_name);
+                       of_node_put(remote);
+                       return -EPROBE_DEFER;
                }
+               of_node_put(remote);
        }
 
        imxpd->dev = dev;
index eeefc971801a7e069f6891279ee6d6e186e625ed..23ac8041c562924120b6fdbe82eda351914e0e85 100644 (file)
@@ -6,7 +6,6 @@ config DRM_MEDIATEK
        select DRM_KMS_HELPER
        select DRM_MIPI_DSI
        select DRM_PANEL
-       select IOMMU_DMA
        select MEMORY
        select MTK_SMI
        help
@@ -14,3 +13,11 @@ config DRM_MEDIATEK
          The module will be called mediatek-drm
          This driver provides kernel mode setting and
          buffer management to userspace.
+
+config DRM_MEDIATEK_HDMI
+       tristate "DRM HDMI Support for Mediatek SoCs"
+       depends on DRM_MEDIATEK
+       select SND_SOC_HDMI_CODEC if SND_SOC
+       select GENERIC_PHY
+       help
+         DRM/KMS HDMI driver for Mediatek SoCs
index 5fcf58e877860974c4d0194c8af23c4dd3e39f14..bf2e5be1ab30df7699b99512b3e67048036726f9 100644 (file)
@@ -12,3 +12,10 @@ mediatek-drm-y := mtk_disp_ovl.o \
                  mtk_dpi.o
 
 obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
+
+mediatek-drm-hdmi-objs := mtk_cec.o \
+                         mtk_hdmi.o \
+                         mtk_hdmi_ddc.o \
+                         mtk_mt8173_hdmi_phy.o
+
+obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
new file mode 100644 (file)
index 0000000..7a3eb8c
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include "mtk_cec.h"
+
+#define TR_CONFIG              0x00
+#define CLEAR_CEC_IRQ                  BIT(15)
+
+#define CEC_CKGEN              0x04
+#define CEC_32K_PDN                    BIT(19)
+#define PDN                            BIT(16)
+
+#define RX_EVENT               0x54
+#define HDMI_PORD                      BIT(25)
+#define HDMI_HTPLG                     BIT(24)
+#define HDMI_PORD_INT_EN               BIT(9)
+#define HDMI_HTPLG_INT_EN              BIT(8)
+
+#define RX_GEN_WD              0x58
+#define HDMI_PORD_INT_32K_STATUS       BIT(26)
+#define RX_RISC_INT_32K_STATUS         BIT(25)
+#define HDMI_HTPLG_INT_32K_STATUS      BIT(24)
+#define HDMI_PORD_INT_32K_CLR          BIT(18)
+#define RX_INT_32K_CLR                 BIT(17)
+#define HDMI_HTPLG_INT_32K_CLR         BIT(16)
+#define HDMI_PORD_INT_32K_STA_MASK     BIT(10)
+#define RX_RISC_INT_32K_STA_MASK       BIT(9)
+#define HDMI_HTPLG_INT_32K_STA_MASK    BIT(8)
+#define HDMI_PORD_INT_32K_EN           BIT(2)
+#define RX_INT_32K_EN                  BIT(1)
+#define HDMI_HTPLG_INT_32K_EN          BIT(0)
+
+#define NORMAL_INT_CTRL                0x5C
+#define HDMI_HTPLG_INT_STA             BIT(0)
+#define HDMI_PORD_INT_STA              BIT(1)
+#define HDMI_HTPLG_INT_CLR             BIT(16)
+#define HDMI_PORD_INT_CLR              BIT(17)
+#define HDMI_FULL_INT_CLR              BIT(20)
+
+struct mtk_cec {
+       void __iomem *regs;
+       struct clk *clk;
+       int irq;
+       bool hpd;
+       void (*hpd_event)(bool hpd, struct device *dev);
+       struct device *hdmi_dev;
+       spinlock_t lock;
+};
+
+static void mtk_cec_clear_bits(struct mtk_cec *cec, unsigned int offset,
+                              unsigned int bits)
+{
+       void __iomem *reg = cec->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp &= ~bits;
+       writel(tmp, reg);
+}
+
+static void mtk_cec_set_bits(struct mtk_cec *cec, unsigned int offset,
+                            unsigned int bits)
+{
+       void __iomem *reg = cec->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp |= bits;
+       writel(tmp, reg);
+}
+
+static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
+                        unsigned int val, unsigned int mask)
+{
+       u32 tmp = readl(cec->regs + offset) & ~mask;
+
+       tmp |= val & mask;
+       writel(val, cec->regs + offset);
+}
+
+void mtk_cec_set_hpd_event(struct device *dev,
+                          void (*hpd_event)(bool hpd, struct device *dev),
+                          struct device *hdmi_dev)
+{
+       struct mtk_cec *cec = dev_get_drvdata(dev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&cec->lock, flags);
+       cec->hdmi_dev = hdmi_dev;
+       cec->hpd_event = hpd_event;
+       spin_unlock_irqrestore(&cec->lock, flags);
+}
+
+bool mtk_cec_hpd_high(struct device *dev)
+{
+       struct mtk_cec *cec = dev_get_drvdata(dev);
+       unsigned int status;
+
+       status = readl(cec->regs + RX_EVENT);
+
+       return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
+}
+
+static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
+{
+       mtk_cec_mask(cec, CEC_CKGEN, 0 | CEC_32K_PDN, PDN | CEC_32K_PDN);
+       mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
+                        RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
+       mtk_cec_mask(cec, RX_GEN_WD, 0, HDMI_PORD_INT_32K_CLR | RX_INT_32K_CLR |
+                    HDMI_HTPLG_INT_32K_CLR | HDMI_PORD_INT_32K_EN |
+                    RX_INT_32K_EN | HDMI_HTPLG_INT_32K_EN);
+}
+
+static void mtk_cec_htplg_irq_enable(struct mtk_cec *cec)
+{
+       mtk_cec_set_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
+}
+
+static void mtk_cec_htplg_irq_disable(struct mtk_cec *cec)
+{
+       mtk_cec_clear_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
+}
+
+static void mtk_cec_clear_htplg_irq(struct mtk_cec *cec)
+{
+       mtk_cec_set_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
+       mtk_cec_set_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
+                        HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
+       mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
+                        RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
+       usleep_range(5, 10);
+       mtk_cec_clear_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
+                          HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
+       mtk_cec_clear_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
+       mtk_cec_clear_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
+                          RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
+}
+
+static void mtk_cec_hpd_event(struct mtk_cec *cec, bool hpd)
+{
+       void (*hpd_event)(bool hpd, struct device *dev);
+       struct device *hdmi_dev;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cec->lock, flags);
+       hpd_event = cec->hpd_event;
+       hdmi_dev = cec->hdmi_dev;
+       spin_unlock_irqrestore(&cec->lock, flags);
+
+       if (hpd_event)
+               hpd_event(hpd, hdmi_dev);
+}
+
+static irqreturn_t mtk_cec_htplg_isr_thread(int irq, void *arg)
+{
+       struct device *dev = arg;
+       struct mtk_cec *cec = dev_get_drvdata(dev);
+       bool hpd;
+
+       mtk_cec_clear_htplg_irq(cec);
+       hpd = mtk_cec_hpd_high(dev);
+
+       if (cec->hpd != hpd) {
+               dev_dbg(dev, "hotplug event! cur hpd = %d, hpd = %d\n",
+                       cec->hpd, hpd);
+               cec->hpd = hpd;
+               mtk_cec_hpd_event(cec, hpd);
+       }
+       return IRQ_HANDLED;
+}
+
+static int mtk_cec_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mtk_cec *cec;
+       struct resource *res;
+       int ret;
+
+       cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
+       if (!cec)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, cec);
+       spin_lock_init(&cec->lock);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       cec->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(cec->regs)) {
+               ret = PTR_ERR(cec->regs);
+               dev_err(dev, "Failed to ioremap cec: %d\n", ret);
+               return ret;
+       }
+
+       cec->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(cec->clk)) {
+               ret = PTR_ERR(cec->clk);
+               dev_err(dev, "Failed to get cec clock: %d\n", ret);
+               return ret;
+       }
+
+       cec->irq = platform_get_irq(pdev, 0);
+       if (cec->irq < 0) {
+               dev_err(dev, "Failed to get cec irq: %d\n", cec->irq);
+               return cec->irq;
+       }
+
+       ret = devm_request_threaded_irq(dev, cec->irq, NULL,
+                                       mtk_cec_htplg_isr_thread,
+                                       IRQF_SHARED | IRQF_TRIGGER_LOW |
+                                       IRQF_ONESHOT, "hdmi hpd", dev);
+       if (ret) {
+               dev_err(dev, "Failed to register cec irq: %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(cec->clk);
+       if (ret) {
+               dev_err(dev, "Failed to enable cec clock: %d\n", ret);
+               return ret;
+       }
+
+       mtk_cec_htplg_irq_init(cec);
+       mtk_cec_htplg_irq_enable(cec);
+
+       return 0;
+}
+
+static int mtk_cec_remove(struct platform_device *pdev)
+{
+       struct mtk_cec *cec = platform_get_drvdata(pdev);
+
+       mtk_cec_htplg_irq_disable(cec);
+       clk_disable_unprepare(cec->clk);
+       return 0;
+}
+
+static const struct of_device_id mtk_cec_of_ids[] = {
+       { .compatible = "mediatek,mt8173-cec", },
+       {}
+};
+
+struct platform_driver mtk_cec_driver = {
+       .probe = mtk_cec_probe,
+       .remove = mtk_cec_remove,
+       .driver = {
+               .name = "mediatek-cec",
+               .of_match_table = mtk_cec_of_ids,
+       },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.h b/drivers/gpu/drm/mediatek/mtk_cec.h
new file mode 100644 (file)
index 0000000..10057b7
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MTK_CEC_H
+#define _MTK_CEC_H
+
+#include <linux/types.h>
+
+struct device;
+
+void mtk_cec_set_hpd_event(struct device *dev,
+                          void (*hotplug_event)(bool hpd, struct device *dev),
+                          struct device *hdmi_dev);
+bool mtk_cec_hpd_high(struct device *dev);
+
+#endif /* _MTK_CEC_H */
index d05ca7901315354e36994aa280f417dfa4fe203d..0186e500d2a544d8c90769428c7673bec1cd68c0 100644 (file)
@@ -432,11 +432,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
        unsigned long pll_rate;
        unsigned int factor;
 
-       if (!dpi) {
-               dev_err(dpi->dev, "invalid argument\n");
-               return -EINVAL;
-       }
-
        pix_rate = 1000UL * mode->clock;
        if (mode->clock <= 74000)
                factor = 8 * 3;
index b1223d54d0ab56200af2cd83029b47e9de0b6bc3..eebb7d881c2ba88b9b50a309a6c37b7087feb77d 100644 (file)
@@ -91,7 +91,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
        mutex_lock(&private->commit.lock);
        flush_work(&private->commit.work);
 
-       drm_atomic_helper_swap_state(drm, state);
+       drm_atomic_helper_swap_state(state, true);
 
        if (async)
                mtk_atomic_schedule(private, state);
@@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = {
        .enable_vblank = mtk_drm_crtc_enable_vblank,
        .disable_vblank = mtk_drm_crtc_disable_vblank,
 
-       .gem_free_object = mtk_drm_gem_free_object,
+       .gem_free_object_unlocked = mtk_drm_gem_free_object,
        .gem_vm_ops = &drm_gem_cma_vm_ops,
        .dumb_create = mtk_drm_gem_dumb_create,
        .dumb_map_offset = mtk_drm_gem_dumb_map_offset,
@@ -280,8 +280,6 @@ static int mtk_drm_bind(struct device *dev)
        if (!drm)
                return -ENOMEM;
 
-       drm_dev_set_unique(drm, dev_name(dev));
-
        drm->dev_private = private;
        private->drm = drm;
 
@@ -293,14 +291,8 @@ static int mtk_drm_bind(struct device *dev)
        if (ret < 0)
                goto err_deinit;
 
-       ret = drm_connector_register_all(drm);
-       if (ret < 0)
-               goto err_unregister;
-
        return 0;
 
-err_unregister:
-       drm_dev_unregister(drm);
 err_deinit:
        mtk_drm_kms_deinit(drm);
 err_free:
@@ -455,7 +447,6 @@ static int mtk_drm_remove(struct platform_device *pdev)
        struct drm_device *drm = private->drm;
        int i;
 
-       drm_connector_unregister_all(drm);
        drm_dev_unregister(drm);
        mtk_drm_kms_deinit(drm);
        drm_dev_unref(drm);
index 51bc8988fc26f08b861397b4cd32c95d827ed49a..3995765a90dcd868d124b3248f6fa28cd4554afc 100644 (file)
@@ -170,6 +170,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
 
        return drm_plane_helper_check_update(plane, state->crtc, fb,
                                             &src, &dest, &clip,
+                                            state->rotation,
                                             DRM_PLANE_HELPER_NO_SCALING,
                                             DRM_PLANE_HELPER_NO_SCALING,
                                             true, true, &visible);
index 2d808e59fefd388867cd66172bbc6fb08d162639..28b2044ed9f285ad7c1bf1441822c3bcd24d9196 100644 (file)
@@ -575,14 +575,6 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
        return drm_panel_get_modes(dsi->panel);
 }
 
-static struct drm_encoder *mtk_dsi_connector_best_encoder(
-               struct drm_connector *connector)
-{
-       struct mtk_dsi *dsi = connector_to_dsi(connector);
-
-       return &dsi->encoder;
-}
-
 static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
        .mode_fixup = mtk_dsi_encoder_mode_fixup,
        .mode_set = mtk_dsi_encoder_mode_set,
@@ -603,7 +595,6 @@ static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
 static const struct drm_connector_helper_funcs
        mtk_dsi_connector_helper_funcs = {
        .get_modes = mtk_dsi_connector_get_modes,
-       .best_encoder = mtk_dsi_connector_best_encoder,
 };
 
 static int mtk_drm_attach_bridge(struct drm_bridge *bridge,
@@ -695,10 +686,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
 {
        drm_encoder_cleanup(&dsi->encoder);
        /* Skip connector cleanup if creation was delegated to the bridge */
-       if (dsi->conn.dev) {
-               drm_connector_unregister(&dsi->conn);
+       if (dsi->conn.dev)
                drm_connector_cleanup(&dsi->conn);
-       }
 }
 
 static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
new file mode 100644 (file)
index 0000000..334562d
--- /dev/null
@@ -0,0 +1,1828 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <sound/hdmi-codec.h>
+#include "mtk_cec.h"
+#include "mtk_hdmi.h"
+#include "mtk_hdmi_regs.h"
+
+#define NCTS_BYTES     7
+
+enum mtk_hdmi_clk_id {
+       MTK_HDMI_CLK_HDMI_PIXEL,
+       MTK_HDMI_CLK_HDMI_PLL,
+       MTK_HDMI_CLK_AUD_BCLK,
+       MTK_HDMI_CLK_AUD_SPDIF,
+       MTK_HDMI_CLK_COUNT
+};
+
+enum hdmi_aud_input_type {
+       HDMI_AUD_INPUT_I2S = 0,
+       HDMI_AUD_INPUT_SPDIF,
+};
+
+enum hdmi_aud_i2s_fmt {
+       HDMI_I2S_MODE_RJT_24BIT = 0,
+       HDMI_I2S_MODE_RJT_16BIT,
+       HDMI_I2S_MODE_LJT_24BIT,
+       HDMI_I2S_MODE_LJT_16BIT,
+       HDMI_I2S_MODE_I2S_24BIT,
+       HDMI_I2S_MODE_I2S_16BIT
+};
+
+enum hdmi_aud_mclk {
+       HDMI_AUD_MCLK_128FS,
+       HDMI_AUD_MCLK_192FS,
+       HDMI_AUD_MCLK_256FS,
+       HDMI_AUD_MCLK_384FS,
+       HDMI_AUD_MCLK_512FS,
+       HDMI_AUD_MCLK_768FS,
+       HDMI_AUD_MCLK_1152FS,
+};
+
+enum hdmi_aud_channel_type {
+       HDMI_AUD_CHAN_TYPE_1_0 = 0,
+       HDMI_AUD_CHAN_TYPE_1_1,
+       HDMI_AUD_CHAN_TYPE_2_0,
+       HDMI_AUD_CHAN_TYPE_2_1,
+       HDMI_AUD_CHAN_TYPE_3_0,
+       HDMI_AUD_CHAN_TYPE_3_1,
+       HDMI_AUD_CHAN_TYPE_4_0,
+       HDMI_AUD_CHAN_TYPE_4_1,
+       HDMI_AUD_CHAN_TYPE_5_0,
+       HDMI_AUD_CHAN_TYPE_5_1,
+       HDMI_AUD_CHAN_TYPE_6_0,
+       HDMI_AUD_CHAN_TYPE_6_1,
+       HDMI_AUD_CHAN_TYPE_7_0,
+       HDMI_AUD_CHAN_TYPE_7_1,
+       HDMI_AUD_CHAN_TYPE_3_0_LRS,
+       HDMI_AUD_CHAN_TYPE_3_1_LRS,
+       HDMI_AUD_CHAN_TYPE_4_0_CLRS,
+       HDMI_AUD_CHAN_TYPE_4_1_CLRS,
+       HDMI_AUD_CHAN_TYPE_6_1_CS,
+       HDMI_AUD_CHAN_TYPE_6_1_CH,
+       HDMI_AUD_CHAN_TYPE_6_1_OH,
+       HDMI_AUD_CHAN_TYPE_6_1_CHR,
+       HDMI_AUD_CHAN_TYPE_7_1_LH_RH,
+       HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR,
+       HDMI_AUD_CHAN_TYPE_7_1_LC_RC,
+       HDMI_AUD_CHAN_TYPE_7_1_LW_RW,
+       HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD,
+       HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS,
+       HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS,
+       HDMI_AUD_CHAN_TYPE_7_1_CS_CH,
+       HDMI_AUD_CHAN_TYPE_7_1_CS_OH,
+       HDMI_AUD_CHAN_TYPE_7_1_CS_CHR,
+       HDMI_AUD_CHAN_TYPE_7_1_CH_OH,
+       HDMI_AUD_CHAN_TYPE_7_1_CH_CHR,
+       HDMI_AUD_CHAN_TYPE_7_1_OH_CHR,
+       HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR,
+       HDMI_AUD_CHAN_TYPE_6_0_CS,
+       HDMI_AUD_CHAN_TYPE_6_0_CH,
+       HDMI_AUD_CHAN_TYPE_6_0_OH,
+       HDMI_AUD_CHAN_TYPE_6_0_CHR,
+       HDMI_AUD_CHAN_TYPE_7_0_LH_RH,
+       HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR,
+       HDMI_AUD_CHAN_TYPE_7_0_LC_RC,
+       HDMI_AUD_CHAN_TYPE_7_0_LW_RW,
+       HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD,
+       HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS,
+       HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS,
+       HDMI_AUD_CHAN_TYPE_7_0_CS_CH,
+       HDMI_AUD_CHAN_TYPE_7_0_CS_OH,
+       HDMI_AUD_CHAN_TYPE_7_0_CS_CHR,
+       HDMI_AUD_CHAN_TYPE_7_0_CH_OH,
+       HDMI_AUD_CHAN_TYPE_7_0_CH_CHR,
+       HDMI_AUD_CHAN_TYPE_7_0_OH_CHR,
+       HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR,
+       HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS,
+       HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF
+};
+
+enum hdmi_aud_channel_swap_type {
+       HDMI_AUD_SWAP_LR,
+       HDMI_AUD_SWAP_LFE_CC,
+       HDMI_AUD_SWAP_LSRS,
+       HDMI_AUD_SWAP_RLS_RRS,
+       HDMI_AUD_SWAP_LR_STATUS,
+};
+
+struct hdmi_audio_param {
+       enum hdmi_audio_coding_type aud_codec;
+       enum hdmi_audio_sample_size aud_sampe_size;
+       enum hdmi_aud_input_type aud_input_type;
+       enum hdmi_aud_i2s_fmt aud_i2s_fmt;
+       enum hdmi_aud_mclk aud_mclk;
+       enum hdmi_aud_channel_type aud_input_chan_type;
+       struct hdmi_codec_params codec_params;
+};
+
+struct mtk_hdmi {
+       struct drm_bridge bridge;
+       struct drm_connector conn;
+       struct device *dev;
+       struct phy *phy;
+       struct device *cec_dev;
+       struct i2c_adapter *ddc_adpt;
+       struct clk *clk[MTK_HDMI_CLK_COUNT];
+       struct drm_display_mode mode;
+       bool dvi_mode;
+       u32 min_clock;
+       u32 max_clock;
+       u32 max_hdisplay;
+       u32 max_vdisplay;
+       u32 ibias;
+       u32 ibias_up;
+       struct regmap *sys_regmap;
+       unsigned int sys_offset;
+       void __iomem *regs;
+       enum hdmi_colorspace csp;
+       struct hdmi_audio_param aud_param;
+       bool audio_enable;
+       bool powered;
+       bool enabled;
+};
+
+static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
+{
+       return container_of(b, struct mtk_hdmi, bridge);
+}
+
+static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
+{
+       return container_of(c, struct mtk_hdmi, conn);
+}
+
+static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
+{
+       return readl(hdmi->regs + offset);
+}
+
+static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
+{
+       writel(val, hdmi->regs + offset);
+}
+
+static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
+{
+       void __iomem *reg = hdmi->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp &= ~bits;
+       writel(tmp, reg);
+}
+
+static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
+{
+       void __iomem *reg = hdmi->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp |= bits;
+       writel(tmp, reg);
+}
+
+static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
+{
+       void __iomem *reg = hdmi->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp = (tmp & ~mask) | (val & mask);
+       writel(tmp, reg);
+}
+
+static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
+{
+       mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
+                     VIDEO_SOURCE_SEL);
+}
+
+static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
+{
+       struct arm_smccc_res res;
+
+       /*
+        * MT8173 HDMI hardware has an output control bit to enable/disable HDMI
+        * output. This bit can only be controlled in ARM supervisor mode.
+        * The ARM trusted firmware provides an API for the HDMI driver to set
+        * this control bit to enable HDMI output in supervisor mode.
+        */
+       arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904, 0x80000000,
+                     0, 0, 0, 0, 0, &res);
+
+       regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+                          HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0);
+       regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+                          HDMI_ON | ANLG_ON, enable ? (HDMI_ON | ANLG_ON) : 0);
+}
+
+static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
+{
+       regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+                          HDMI2P0_EN, enable ? 0 : HDMI2P0_EN);
+}
+
+static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
+{
+       mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+}
+
+static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
+{
+       mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
+}
+
+static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
+{
+       regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+                          HDMI_RST, HDMI_RST);
+       regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+                          HDMI_RST, 0);
+       mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
+       regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
+                          ANLG_ON, ANLG_ON);
+}
+
+static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
+{
+       mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
+                     CFG2_NOTICE_EN);
+}
+
+static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
+{
+       mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
+}
+
+static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
+{
+       mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
+}
+
+static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
+                                       u8 len)
+{
+       u32 ctrl_reg = GRL_CTRL;
+       int i;
+       u8 *frame_data;
+       enum hdmi_infoframe_type frame_type;
+       u8 frame_ver;
+       u8 frame_len;
+       u8 checksum;
+       int ctrl_frame_en = 0;
+
+       frame_type = *buffer;
+       buffer += 1;
+       frame_ver = *buffer;
+       buffer += 1;
+       frame_len = *buffer;
+       buffer += 1;
+       checksum = *buffer;
+       buffer += 1;
+       frame_data = buffer;
+
+       dev_dbg(hdmi->dev,
+               "frame_type:0x%x,frame_ver:0x%x,frame_len:0x%x,checksum:0x%x\n",
+               frame_type, frame_ver, frame_len, checksum);
+
+       switch (frame_type) {
+       case HDMI_INFOFRAME_TYPE_AVI:
+               ctrl_frame_en = CTRL_AVI_EN;
+               ctrl_reg = GRL_CTRL;
+               break;
+       case HDMI_INFOFRAME_TYPE_SPD:
+               ctrl_frame_en = CTRL_SPD_EN;
+               ctrl_reg = GRL_CTRL;
+               break;
+       case HDMI_INFOFRAME_TYPE_AUDIO:
+               ctrl_frame_en = CTRL_AUDIO_EN;
+               ctrl_reg = GRL_CTRL;
+               break;
+       case HDMI_INFOFRAME_TYPE_VENDOR:
+               ctrl_frame_en = VS_EN;
+               ctrl_reg = GRL_ACP_ISRC_CTRL;
+               break;
+       }
+       mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
+       mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
+       mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
+       mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
+
+       mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
+       for (i = 0; i < frame_len; i++)
+               mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
+
+       mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
+}
+
+static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
+{
+       mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
+                     AUDIO_PACKET_OFF);
+}
+
+static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
+{
+       regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+                          HDMI_OUT_FIFO_EN | MHL_MODE_ON, 0);
+       usleep_range(2000, 4000);
+       regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+                          HDMI_OUT_FIFO_EN | MHL_MODE_ON, HDMI_OUT_FIFO_EN);
+}
+
+static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
+{
+       regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
+                          DEEP_COLOR_MODE_MASK | DEEP_COLOR_EN,
+                          COLOR_8BIT_MODE);
+}
+
+static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
+{
+       mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+       usleep_range(2000, 4000);
+       mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
+}
+
+static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
+{
+       mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
+                     CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+       usleep_range(2000, 4000);
+       mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
+                     CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
+}
+
+static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
+{
+       mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
+                     CTS_CTRL_SOFT);
+}
+
+static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
+                                              bool enable)
+{
+       mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
+                     NCTS_WRI_ANYTIME);
+}
+
+static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
+                                    struct drm_display_mode *mode)
+{
+       mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
+
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
+           mode->clock == 74250 &&
+           mode->vdisplay == 1080)
+               mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+       else
+               mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
+}
+
+static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
+                                       enum hdmi_aud_channel_swap_type swap)
+{
+       u8 swap_bit;
+
+       switch (swap) {
+       case HDMI_AUD_SWAP_LR:
+               swap_bit = LR_SWAP;
+               break;
+       case HDMI_AUD_SWAP_LFE_CC:
+               swap_bit = LFE_CC_SWAP;
+               break;
+       case HDMI_AUD_SWAP_LSRS:
+               swap_bit = LSRS_SWAP;
+               break;
+       case HDMI_AUD_SWAP_RLS_RRS:
+               swap_bit = RLS_RRS_SWAP;
+               break;
+       case HDMI_AUD_SWAP_LR_STATUS:
+               swap_bit = LR_STATUS_SWAP;
+               break;
+       default:
+               swap_bit = LFE_CC_SWAP;
+               break;
+       }
+       mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
+}
+
+static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
+                                       enum hdmi_audio_sample_size bit_num)
+{
+       u32 val;
+
+       switch (bit_num) {
+       case HDMI_AUDIO_SAMPLE_SIZE_16:
+               val = AOUT_16BIT;
+               break;
+       case HDMI_AUDIO_SAMPLE_SIZE_20:
+               val = AOUT_20BIT;
+               break;
+       case HDMI_AUDIO_SAMPLE_SIZE_24:
+       case HDMI_AUDIO_SAMPLE_SIZE_STREAM:
+               val = AOUT_24BIT;
+               break;
+       }
+
+       mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
+}
+
+static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
+                                       enum hdmi_aud_i2s_fmt i2s_fmt)
+{
+       u32 val;
+
+       val = mtk_hdmi_read(hdmi, GRL_CFG0);
+       val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
+
+       switch (i2s_fmt) {
+       case HDMI_I2S_MODE_RJT_24BIT:
+               val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_24BIT;
+               break;
+       case HDMI_I2S_MODE_RJT_16BIT:
+               val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_16BIT;
+               break;
+       case HDMI_I2S_MODE_LJT_24BIT:
+       default:
+               val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_24BIT;
+               break;
+       case HDMI_I2S_MODE_LJT_16BIT:
+               val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_16BIT;
+               break;
+       case HDMI_I2S_MODE_I2S_24BIT:
+               val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_24BIT;
+               break;
+       case HDMI_I2S_MODE_I2S_16BIT:
+               val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
+               break;
+       }
+       mtk_hdmi_write(hdmi, GRL_CFG0, val);
+}
+
+static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
+{
+       const u8 mask = HIGH_BIT_RATE | DST_NORMAL_DOUBLE | SACD_DST | DSD_SEL;
+       u8 val;
+
+       /* Disable high bitrate, set DST packet normal/double */
+       mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
+
+       if (dst)
+               val = DST_NORMAL_DOUBLE | SACD_DST;
+       else
+               val = 0;
+
+       mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
+}
+
+static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
+                                       enum hdmi_aud_channel_type channel_type,
+                                       u8 channel_count)
+{
+       unsigned int ch_switch;
+       u8 i2s_uv;
+
+       ch_switch = CH_SWITCH(7, 7) | CH_SWITCH(6, 6) |
+                   CH_SWITCH(5, 5) | CH_SWITCH(4, 4) |
+                   CH_SWITCH(3, 3) | CH_SWITCH(1, 2) |
+                   CH_SWITCH(2, 1) | CH_SWITCH(0, 0);
+
+       if (channel_count == 2) {
+               i2s_uv = I2S_UV_CH_EN(0);
+       } else if (channel_count == 3 || channel_count == 4) {
+               if (channel_count == 4 &&
+                   (channel_type == HDMI_AUD_CHAN_TYPE_3_0_LRS ||
+                   channel_type == HDMI_AUD_CHAN_TYPE_4_0))
+                       i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(0);
+               else
+                       i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2);
+       } else if (channel_count == 6 || channel_count == 5) {
+               if (channel_count == 6 &&
+                   channel_type != HDMI_AUD_CHAN_TYPE_5_1 &&
+                   channel_type != HDMI_AUD_CHAN_TYPE_4_1_CLRS) {
+                       i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
+                                I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
+               } else {
+                       i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(1) |
+                                I2S_UV_CH_EN(0);
+               }
+       } else if (channel_count == 8 || channel_count == 7) {
+               i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
+                        I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
+       } else {
+               i2s_uv = I2S_UV_CH_EN(0);
+       }
+
+       mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
+       mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
+       mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
+       mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
+}
+
+static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
+                                          enum hdmi_aud_input_type input_type)
+{
+       u32 val;
+
+       val = mtk_hdmi_read(hdmi, GRL_CFG1);
+       if (input_type == HDMI_AUD_INPUT_I2S &&
+           (val & CFG1_SPDIF) == CFG1_SPDIF) {
+               val &= ~CFG1_SPDIF;
+       } else if (input_type == HDMI_AUD_INPUT_SPDIF &&
+               (val & CFG1_SPDIF) == 0) {
+               val |= CFG1_SPDIF;
+       }
+       mtk_hdmi_write(hdmi, GRL_CFG1, val);
+}
+
+static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
+                                              u8 *channel_status)
+{
+       int i;
+
+       for (i = 0; i < 5; i++) {
+               mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
+               mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
+               mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
+       }
+       for (; i < 24; i++) {
+               mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
+               mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
+       }
+}
+
+static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
+{
+       u32 val;
+
+       val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+       if (val & MIX_CTRL_SRC_EN) {
+               val &= ~MIX_CTRL_SRC_EN;
+               mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+               usleep_range(255, 512);
+               val |= MIX_CTRL_SRC_EN;
+               mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+       }
+}
+
+static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
+{
+       u32 val;
+
+       val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
+       val &= ~MIX_CTRL_SRC_EN;
+       mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
+       mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
+}
+
+static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
+                                    enum hdmi_aud_mclk mclk)
+{
+       u32 val;
+
+       val = mtk_hdmi_read(hdmi, GRL_CFG5);
+       val &= CFG5_CD_RATIO_MASK;
+
+       switch (mclk) {
+       case HDMI_AUD_MCLK_128FS:
+               val |= CFG5_FS128;
+               break;
+       case HDMI_AUD_MCLK_256FS:
+               val |= CFG5_FS256;
+               break;
+       case HDMI_AUD_MCLK_384FS:
+               val |= CFG5_FS384;
+               break;
+       case HDMI_AUD_MCLK_512FS:
+               val |= CFG5_FS512;
+               break;
+       case HDMI_AUD_MCLK_768FS:
+               val |= CFG5_FS768;
+               break;
+       default:
+               val |= CFG5_FS256;
+               break;
+       }
+       mtk_hdmi_write(hdmi, GRL_CFG5, val);
+}
+
+struct hdmi_acr_n {
+       unsigned int clock;
+       unsigned int n[3];
+};
+
+/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */
+static const struct hdmi_acr_n hdmi_rec_n_table[] = {
+       /* Clock, N: 32kHz 44.1kHz 48kHz */
+       {  25175, {  4576,  7007,  6864 } },
+       {  74176, { 11648, 17836, 11648 } },
+       { 148352, { 11648,  8918,  5824 } },
+       { 296703, {  5824,  4459,  5824 } },
+       { 297000, {  3072,  4704,  5120 } },
+       {      0, {  4096,  6272,  6144 } }, /* all other TMDS clocks */
+};
+
+/**
+ * hdmi_recommended_n() - Return N value recommended by HDMI specification
+ * @freq: audio sample rate in Hz
+ * @clock: rounded TMDS clock in kHz
+ */
+static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock)
+{
+       const struct hdmi_acr_n *recommended;
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) {
+               if (clock == hdmi_rec_n_table[i].clock)
+                       break;
+       }
+       recommended = hdmi_rec_n_table + i;
+
+       switch (freq) {
+       case 32000:
+               return recommended->n[0];
+       case 44100:
+               return recommended->n[1];
+       case 48000:
+               return recommended->n[2];
+       case 88200:
+               return recommended->n[1] * 2;
+       case 96000:
+               return recommended->n[2] * 2;
+       case 176400:
+               return recommended->n[1] * 4;
+       case 192000:
+               return recommended->n[2] * 4;
+       default:
+               return (128 * freq) / 1000;
+       }
+}
+
+static unsigned int hdmi_mode_clock_to_hz(unsigned int clock)
+{
+       switch (clock) {
+       case 25175:
+               return 25174825;        /* 25.2/1.001 MHz */
+       case 74176:
+               return 74175824;        /* 74.25/1.001 MHz */
+       case 148352:
+               return 148351648;       /* 148.5/1.001 MHz */
+       case 296703:
+               return 296703297;       /* 297/1.001 MHz */
+       default:
+               return clock * 1000;
+       }
+}
+
+static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
+                                     unsigned int tmds_clock, unsigned int n)
+{
+       return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n,
+                                    128 * audio_sample_rate);
+}
+
+static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
+                                   unsigned int cts)
+{
+       unsigned char val[NCTS_BYTES];
+       int i;
+
+       mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+       mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+       mtk_hdmi_write(hdmi, GRL_NCTS, 0);
+       memset(val, 0, sizeof(val));
+
+       val[0] = (cts >> 24) & 0xff;
+       val[1] = (cts >> 16) & 0xff;
+       val[2] = (cts >> 8) & 0xff;
+       val[3] = cts & 0xff;
+
+       val[4] = (n >> 16) & 0xff;
+       val[5] = (n >> 8) & 0xff;
+       val[6] = n & 0xff;
+
+       for (i = 0; i < NCTS_BYTES; i++)
+               mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
+}
+
+static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
+                                    unsigned int sample_rate,
+                                    unsigned int clock)
+{
+       unsigned int n, cts;
+
+       n = hdmi_recommended_n(sample_rate, clock);
+       cts = hdmi_expected_cts(sample_rate, clock, n);
+
+       dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
+               __func__, sample_rate, clock, n, cts);
+
+       mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
+                     AUDIO_I2S_NCTS_SEL);
+       do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
+}
+
+static u8 mtk_hdmi_aud_get_chnl_count(enum hdmi_aud_channel_type channel_type)
+{
+       switch (channel_type) {
+       case HDMI_AUD_CHAN_TYPE_1_0:
+       case HDMI_AUD_CHAN_TYPE_1_1:
+       case HDMI_AUD_CHAN_TYPE_2_0:
+               return 2;
+       case HDMI_AUD_CHAN_TYPE_2_1:
+       case HDMI_AUD_CHAN_TYPE_3_0:
+               return 3;
+       case HDMI_AUD_CHAN_TYPE_3_1:
+       case HDMI_AUD_CHAN_TYPE_4_0:
+       case HDMI_AUD_CHAN_TYPE_3_0_LRS:
+               return 4;
+       case HDMI_AUD_CHAN_TYPE_4_1:
+       case HDMI_AUD_CHAN_TYPE_5_0:
+       case HDMI_AUD_CHAN_TYPE_3_1_LRS:
+       case HDMI_AUD_CHAN_TYPE_4_0_CLRS:
+               return 5;
+       case HDMI_AUD_CHAN_TYPE_5_1:
+       case HDMI_AUD_CHAN_TYPE_6_0:
+       case HDMI_AUD_CHAN_TYPE_4_1_CLRS:
+       case HDMI_AUD_CHAN_TYPE_6_0_CS:
+       case HDMI_AUD_CHAN_TYPE_6_0_CH:
+       case HDMI_AUD_CHAN_TYPE_6_0_OH:
+       case HDMI_AUD_CHAN_TYPE_6_0_CHR:
+               return 6;
+       case HDMI_AUD_CHAN_TYPE_6_1:
+       case HDMI_AUD_CHAN_TYPE_6_1_CS:
+       case HDMI_AUD_CHAN_TYPE_6_1_CH:
+       case HDMI_AUD_CHAN_TYPE_6_1_OH:
+       case HDMI_AUD_CHAN_TYPE_6_1_CHR:
+       case HDMI_AUD_CHAN_TYPE_7_0:
+       case HDMI_AUD_CHAN_TYPE_7_0_LH_RH:
+       case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR:
+       case HDMI_AUD_CHAN_TYPE_7_0_LC_RC:
+       case HDMI_AUD_CHAN_TYPE_7_0_LW_RW:
+       case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD:
+       case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS:
+       case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS:
+       case HDMI_AUD_CHAN_TYPE_7_0_CS_CH:
+       case HDMI_AUD_CHAN_TYPE_7_0_CS_OH:
+       case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR:
+       case HDMI_AUD_CHAN_TYPE_7_0_CH_OH:
+       case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR:
+       case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR:
+       case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR:
+       case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS:
+               return 7;
+       case HDMI_AUD_CHAN_TYPE_7_1:
+       case HDMI_AUD_CHAN_TYPE_7_1_LH_RH:
+       case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR:
+       case HDMI_AUD_CHAN_TYPE_7_1_LC_RC:
+       case HDMI_AUD_CHAN_TYPE_7_1_LW_RW:
+       case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD:
+       case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS:
+       case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS:
+       case HDMI_AUD_CHAN_TYPE_7_1_CS_CH:
+       case HDMI_AUD_CHAN_TYPE_7_1_CS_OH:
+       case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR:
+       case HDMI_AUD_CHAN_TYPE_7_1_CH_OH:
+       case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR:
+       case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR:
+       case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR:
+               return 8;
+       default:
+               return 2;
+       }
+}
+
+static int mtk_hdmi_video_change_vpll(struct mtk_hdmi *hdmi, u32 clock)
+{
+       unsigned long rate;
+       int ret;
+
+       /* The DPI driver already should have set TVDPLL to the correct rate */
+       ret = clk_set_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL], clock);
+       if (ret) {
+               dev_err(hdmi->dev, "Failed to set PLL to %u Hz: %d\n", clock,
+                       ret);
+               return ret;
+       }
+
+       rate = clk_get_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+
+       if (DIV_ROUND_CLOSEST(rate, 1000) != DIV_ROUND_CLOSEST(clock, 1000))
+               dev_warn(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock,
+                        rate);
+       else
+               dev_dbg(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, rate);
+
+       mtk_hdmi_hw_config_sys(hdmi);
+       mtk_hdmi_hw_set_deep_color_mode(hdmi);
+       return 0;
+}
+
+static void mtk_hdmi_video_set_display_mode(struct mtk_hdmi *hdmi,
+                                           struct drm_display_mode *mode)
+{
+       mtk_hdmi_hw_reset(hdmi);
+       mtk_hdmi_hw_enable_notice(hdmi, true);
+       mtk_hdmi_hw_write_int_mask(hdmi, 0xff);
+       mtk_hdmi_hw_enable_dvi_mode(hdmi, hdmi->dvi_mode);
+       mtk_hdmi_hw_ncts_auto_write_enable(hdmi, true);
+
+       mtk_hdmi_hw_msic_setting(hdmi, mode);
+}
+
+static int mtk_hdmi_aud_enable_packet(struct mtk_hdmi *hdmi, bool enable)
+{
+       mtk_hdmi_hw_send_aud_packet(hdmi, enable);
+       return 0;
+}
+
+static int mtk_hdmi_aud_on_off_hw_ncts(struct mtk_hdmi *hdmi, bool on)
+{
+       mtk_hdmi_hw_ncts_enable(hdmi, on);
+       return 0;
+}
+
+static int mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
+{
+       enum hdmi_aud_channel_type chan_type;
+       u8 chan_count;
+       bool dst;
+
+       mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
+       mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
+
+       if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
+           hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
+               mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
+       } else if (hdmi->aud_param.aud_i2s_fmt == HDMI_I2S_MODE_LJT_24BIT) {
+               hdmi->aud_param.aud_i2s_fmt = HDMI_I2S_MODE_LJT_16BIT;
+       }
+
+       mtk_hdmi_hw_aud_set_i2s_fmt(hdmi, hdmi->aud_param.aud_i2s_fmt);
+       mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
+
+       dst = ((hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) &&
+              (hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST));
+       mtk_hdmi_hw_audio_config(hdmi, dst);
+
+       if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF)
+               chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+       else
+               chan_type = hdmi->aud_param.aud_input_chan_type;
+       chan_count = mtk_hdmi_aud_get_chnl_count(chan_type);
+       mtk_hdmi_hw_aud_set_i2s_chan_num(hdmi, chan_type, chan_count);
+       mtk_hdmi_hw_aud_set_input_type(hdmi, hdmi->aud_param.aud_input_type);
+
+       return 0;
+}
+
+static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
+                               struct drm_display_mode *display_mode)
+{
+       unsigned int sample_rate = hdmi->aud_param.codec_params.sample_rate;
+
+       mtk_hdmi_aud_on_off_hw_ncts(hdmi, false);
+       mtk_hdmi_hw_aud_src_disable(hdmi);
+       mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
+
+       if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
+               switch (sample_rate) {
+               case 32000:
+               case 44100:
+               case 48000:
+               case 88200:
+               case 96000:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               mtk_hdmi_hw_aud_set_mclk(hdmi, hdmi->aud_param.aud_mclk);
+       } else {
+               switch (sample_rate) {
+               case 32000:
+               case 44100:
+               case 48000:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               mtk_hdmi_hw_aud_set_mclk(hdmi, HDMI_AUD_MCLK_128FS);
+       }
+
+       mtk_hdmi_hw_aud_set_ncts(hdmi, sample_rate, display_mode->clock);
+
+       mtk_hdmi_hw_aud_src_reenable(hdmi);
+       return 0;
+}
+
+static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi,
+                                     struct drm_display_mode *display_mode)
+{
+       mtk_hdmi_hw_aud_mute(hdmi);
+       mtk_hdmi_aud_enable_packet(hdmi, false);
+
+       mtk_hdmi_aud_set_input(hdmi);
+       mtk_hdmi_aud_set_src(hdmi, display_mode);
+       mtk_hdmi_hw_aud_set_channel_status(hdmi,
+                       hdmi->aud_param.codec_params.iec.status);
+
+       usleep_range(50, 100);
+
+       mtk_hdmi_aud_on_off_hw_ncts(hdmi, true);
+       mtk_hdmi_aud_enable_packet(hdmi, true);
+       mtk_hdmi_hw_aud_unmute(hdmi);
+       return 0;
+}
+
+static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
+                                       struct drm_display_mode *mode)
+{
+       struct hdmi_avi_infoframe frame;
+       u8 buffer[17];
+       ssize_t err;
+
+       err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+       if (err < 0) {
+               dev_err(hdmi->dev,
+                       "Failed to get AVI infoframe from mode: %zd\n", err);
+               return err;
+       }
+
+       err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+       if (err < 0) {
+               dev_err(hdmi->dev, "Failed to pack AVI infoframe: %zd\n", err);
+               return err;
+       }
+
+       mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+       return 0;
+}
+
+static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
+                                       const char *vendor,
+                                       const char *product)
+{
+       struct hdmi_spd_infoframe frame;
+       u8 buffer[29];
+       ssize_t err;
+
+       err = hdmi_spd_infoframe_init(&frame, vendor, product);
+       if (err < 0) {
+               dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n",
+                       err);
+               return err;
+       }
+
+       err = hdmi_spd_infoframe_pack(&frame, buffer, sizeof(buffer));
+       if (err < 0) {
+               dev_err(hdmi->dev, "Failed to pack SDP infoframe: %zd\n", err);
+               return err;
+       }
+
+       mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+       return 0;
+}
+
+static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
+{
+       struct hdmi_audio_infoframe frame;
+       u8 buffer[14];
+       ssize_t err;
+
+       err = hdmi_audio_infoframe_init(&frame);
+       if (err < 0) {
+               dev_err(hdmi->dev, "Failed to setup audio infoframe: %zd\n",
+                       err);
+               return err;
+       }
+
+       frame.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
+       frame.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
+       frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
+       frame.channels = mtk_hdmi_aud_get_chnl_count(
+                                       hdmi->aud_param.aud_input_chan_type);
+
+       err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+       if (err < 0) {
+               dev_err(hdmi->dev, "Failed to pack audio infoframe: %zd\n",
+                       err);
+               return err;
+       }
+
+       mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+       return 0;
+}
+
+static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
+                                               struct drm_display_mode *mode)
+{
+       struct hdmi_vendor_infoframe frame;
+       u8 buffer[10];
+       ssize_t err;
+
+       err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, mode);
+       if (err) {
+               dev_err(hdmi->dev,
+                       "Failed to get vendor infoframe from mode: %zd\n", err);
+               return err;
+       }
+
+       err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
+       if (err) {
+               dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
+                       err);
+               return err;
+       }
+
+       mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
+       return 0;
+}
+
+static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
+{
+       struct hdmi_audio_param *aud_param = &hdmi->aud_param;
+
+       hdmi->csp = HDMI_COLORSPACE_RGB;
+       aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+       aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+       aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
+       aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+       aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
+       aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+
+       return 0;
+}
+
+void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
+{
+       mtk_hdmi_aud_enable_packet(hdmi, true);
+       hdmi->audio_enable = true;
+}
+
+void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
+{
+       mtk_hdmi_aud_enable_packet(hdmi, false);
+       hdmi->audio_enable = false;
+}
+
+int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
+                            struct hdmi_audio_param *param)
+{
+       if (!hdmi->audio_enable) {
+               dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
+               return -EINVAL;
+       }
+       dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
+               param->aud_codec, param->aud_input_type,
+               param->aud_input_chan_type, param->codec_params.sample_rate);
+       memcpy(&hdmi->aud_param, param, sizeof(*param));
+       return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
+}
+
+static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
+                                           struct drm_display_mode *mode)
+{
+       int ret;
+
+       mtk_hdmi_hw_vid_black(hdmi, true);
+       mtk_hdmi_hw_aud_mute(hdmi);
+       mtk_hdmi_hw_send_av_mute(hdmi);
+       phy_power_off(hdmi->phy);
+
+       ret = mtk_hdmi_video_change_vpll(hdmi,
+                                        mode->clock * 1000);
+       if (ret) {
+               dev_err(hdmi->dev, "Failed to set vpll: %d\n", ret);
+               return ret;
+       }
+       mtk_hdmi_video_set_display_mode(hdmi, mode);
+
+       phy_power_on(hdmi->phy);
+       mtk_hdmi_aud_output_config(hdmi, mode);
+
+       mtk_hdmi_setup_audio_infoframe(hdmi);
+       mtk_hdmi_setup_avi_infoframe(hdmi, mode);
+       mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
+       if (mode->flags & DRM_MODE_FLAG_3D_MASK)
+               mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
+
+       mtk_hdmi_hw_vid_black(hdmi, false);
+       mtk_hdmi_hw_aud_unmute(hdmi);
+       mtk_hdmi_hw_send_av_unmute(hdmi);
+
+       return 0;
+}
+
+static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = {
+       [MTK_HDMI_CLK_HDMI_PIXEL] = "pixel",
+       [MTK_HDMI_CLK_HDMI_PLL] = "pll",
+       [MTK_HDMI_CLK_AUD_BCLK] = "bclk",
+       [MTK_HDMI_CLK_AUD_SPDIF] = "spdif",
+};
+
+static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi,
+                               struct device_node *np)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(mtk_hdmi_clk_names); i++) {
+               hdmi->clk[i] = of_clk_get_by_name(np,
+                                                 mtk_hdmi_clk_names[i]);
+               if (IS_ERR(hdmi->clk[i]))
+                       return PTR_ERR(hdmi->clk[i]);
+       }
+       return 0;
+}
+
+static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi)
+{
+       int ret;
+
+       ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+       return ret;
+}
+
+static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
+{
+       clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
+       clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
+}
+
+static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
+                                                 bool force)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+       return mtk_cec_hpd_high(hdmi->cec_dev) ?
+              connector_status_connected : connector_status_disconnected;
+}
+
+static void hdmi_conn_destroy(struct drm_connector *conn)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+       mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
+
+       drm_connector_cleanup(conn);
+}
+
+static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+       struct edid *edid;
+       int ret;
+
+       if (!hdmi->ddc_adpt)
+               return -ENODEV;
+
+       edid = drm_get_edid(conn, hdmi->ddc_adpt);
+       if (!edid)
+               return -ENODEV;
+
+       hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
+
+       drm_mode_connector_update_edid_property(conn, edid);
+
+       ret = drm_add_edid_modes(conn, edid);
+       drm_edid_to_eld(conn, edid);
+       kfree(edid);
+       return ret;
+}
+
+static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
+                                   struct drm_display_mode *mode)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+       dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
+               mode->hdisplay, mode->vdisplay, mode->vrefresh,
+               !!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
+
+       if (hdmi->bridge.next) {
+               struct drm_display_mode adjusted_mode;
+
+               drm_mode_copy(&adjusted_mode, mode);
+               if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode,
+                                          &adjusted_mode))
+                       return MODE_BAD;
+       }
+
+       if (mode->clock < 27000)
+               return MODE_CLOCK_LOW;
+       if (mode->clock > 297000)
+               return MODE_CLOCK_HIGH;
+
+       return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
+}
+
+static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
+
+       return hdmi->bridge.encoder;
+}
+
+static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
+       .dpms = drm_atomic_helper_connector_dpms,
+       .detect = hdmi_conn_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = hdmi_conn_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs
+               mtk_hdmi_connector_helper_funcs = {
+       .get_modes = mtk_hdmi_conn_get_modes,
+       .mode_valid = mtk_hdmi_conn_mode_valid,
+       .best_encoder = mtk_hdmi_conn_best_enc,
+};
+
+static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
+{
+       struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+       if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
+               drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
+}
+
+/*
+ * Bridge callbacks
+ */
+
+static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+       int ret;
+
+       ret = drm_connector_init(bridge->encoder->dev, &hdmi->conn,
+                                &mtk_hdmi_connector_funcs,
+                                DRM_MODE_CONNECTOR_HDMIA);
+       if (ret) {
+               dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
+               return ret;
+       }
+       drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
+
+       hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
+       hdmi->conn.interlace_allowed = true;
+       hdmi->conn.doublescan_allowed = false;
+
+       ret = drm_mode_connector_attach_encoder(&hdmi->conn,
+                                               bridge->encoder);
+       if (ret) {
+               dev_err(hdmi->dev,
+                       "Failed to attach connector to encoder: %d\n", ret);
+               return ret;
+       }
+
+       if (bridge->next) {
+               bridge->next->encoder = bridge->encoder;
+               ret = drm_bridge_attach(bridge->encoder->dev, bridge->next);
+               if (ret) {
+                       dev_err(hdmi->dev,
+                               "Failed to attach external bridge: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       mtk_cec_set_hpd_event(hdmi->cec_dev, mtk_hdmi_hpd_event, hdmi->dev);
+
+       return 0;
+}
+
+static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+                                      const struct drm_display_mode *mode,
+                                      struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+       if (!hdmi->enabled)
+               return;
+
+       phy_power_off(hdmi->phy);
+       clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
+       clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+
+       hdmi->enabled = false;
+}
+
+static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+       if (!hdmi->powered)
+               return;
+
+       mtk_hdmi_hw_1p4_version_enable(hdmi, true);
+       mtk_hdmi_hw_make_reg_writable(hdmi, false);
+
+       hdmi->powered = false;
+}
+
+static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+                                    struct drm_display_mode *mode,
+                                    struct drm_display_mode *adjusted_mode)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+       dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n",
+               adjusted_mode->name, adjusted_mode->hdisplay);
+       dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d",
+               adjusted_mode->hsync_start, adjusted_mode->hsync_end,
+               adjusted_mode->htotal);
+       dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n",
+               adjusted_mode->hskew, adjusted_mode->vdisplay);
+       dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d",
+               adjusted_mode->vsync_start, adjusted_mode->vsync_end,
+               adjusted_mode->vtotal);
+       dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n",
+               adjusted_mode->vscan, adjusted_mode->flags);
+
+       drm_mode_copy(&hdmi->mode, adjusted_mode);
+}
+
+static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+       mtk_hdmi_hw_make_reg_writable(hdmi, true);
+       mtk_hdmi_hw_1p4_version_enable(hdmi, true);
+
+       hdmi->powered = true;
+}
+
+static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+       struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+       mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
+       clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
+       clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
+       phy_power_on(hdmi->phy);
+
+       hdmi->enabled = true;
+}
+
+static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
+       .attach = mtk_hdmi_bridge_attach,
+       .mode_fixup = mtk_hdmi_bridge_mode_fixup,
+       .disable = mtk_hdmi_bridge_disable,
+       .post_disable = mtk_hdmi_bridge_post_disable,
+       .mode_set = mtk_hdmi_bridge_mode_set,
+       .pre_enable = mtk_hdmi_bridge_pre_enable,
+       .enable = mtk_hdmi_bridge_enable,
+};
+
+static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
+                                  struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct device_node *cec_np, *port, *ep, *remote, *i2c_np;
+       struct platform_device *cec_pdev;
+       struct regmap *regmap;
+       struct resource *mem;
+       int ret;
+
+       ret = mtk_hdmi_get_all_clk(hdmi, np);
+       if (ret) {
+               dev_err(dev, "Failed to get clocks: %d\n", ret);
+               return ret;
+       }
+
+       /* The CEC module handles HDMI hotplug detection */
+       cec_np = of_find_compatible_node(np->parent, NULL,
+                                        "mediatek,mt8173-cec");
+       if (!cec_np) {
+               dev_err(dev, "Failed to find CEC node\n");
+               return -EINVAL;
+       }
+
+       cec_pdev = of_find_device_by_node(cec_np);
+       if (!cec_pdev) {
+               dev_err(hdmi->dev, "Waiting for CEC device %s\n",
+                       cec_np->full_name);
+               return -EPROBE_DEFER;
+       }
+       hdmi->cec_dev = &cec_pdev->dev;
+
+       /*
+        * The mediatek,syscon-hdmi property contains a phandle link to the
+        * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
+        * registers it contains.
+        */
+       regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
+       ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
+                                        &hdmi->sys_offset);
+       if (IS_ERR(regmap))
+               ret = PTR_ERR(regmap);
+       if (ret) {
+               ret = PTR_ERR(regmap);
+               dev_err(dev,
+                       "Failed to get system configuration registers: %d\n",
+                       ret);
+               return ret;
+       }
+       hdmi->sys_regmap = regmap;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       hdmi->regs = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(hdmi->regs))
+               return PTR_ERR(hdmi->regs);
+
+       port = of_graph_get_port_by_id(np, 1);
+       if (!port) {
+               dev_err(dev, "Missing output port node\n");
+               return -EINVAL;
+       }
+
+       ep = of_get_child_by_name(port, "endpoint");
+       if (!ep) {
+               dev_err(dev, "Missing endpoint node in port %s\n",
+                       port->full_name);
+               of_node_put(port);
+               return -EINVAL;
+       }
+       of_node_put(port);
+
+       remote = of_graph_get_remote_port_parent(ep);
+       if (!remote) {
+               dev_err(dev, "Missing connector/bridge node for endpoint %s\n",
+                       ep->full_name);
+               of_node_put(ep);
+               return -EINVAL;
+       }
+       of_node_put(ep);
+
+       if (!of_device_is_compatible(remote, "hdmi-connector")) {
+               hdmi->bridge.next = of_drm_find_bridge(remote);
+               if (!hdmi->bridge.next) {
+                       dev_err(dev, "Waiting for external bridge\n");
+                       of_node_put(remote);
+                       return -EPROBE_DEFER;
+               }
+       }
+
+       i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+       if (!i2c_np) {
+               dev_err(dev, "Failed to find ddc-i2c-bus node in %s\n",
+                       remote->full_name);
+               of_node_put(remote);
+               return -EINVAL;
+       }
+       of_node_put(remote);
+
+       hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+       if (!hdmi->ddc_adpt) {
+               dev_err(dev, "Failed to get ddc i2c adapter by node\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/*
+ * HDMI audio codec callbacks
+ */
+
+static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
+                                   struct hdmi_codec_daifmt *daifmt,
+                                   struct hdmi_codec_params *params)
+{
+       struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+       struct hdmi_audio_param hdmi_params;
+       unsigned int chan = params->cea.channels;
+
+       dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
+               params->sample_rate, params->sample_width, chan);
+
+       if (!hdmi->bridge.encoder)
+               return -ENODEV;
+
+       switch (chan) {
+       case 2:
+               hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+               break;
+       case 4:
+               hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
+               break;
+       case 6:
+               hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
+               break;
+       case 8:
+               hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
+               break;
+       default:
+               dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
+               return -EINVAL;
+       }
+
+       switch (params->sample_rate) {
+       case 32000:
+       case 44100:
+       case 48000:
+       case 88200:
+       case 96000:
+       case 176400:
+       case 192000:
+               break;
+       default:
+               dev_err(hdmi->dev, "rate[%d] not supported!\n",
+                       params->sample_rate);
+               return -EINVAL;
+       }
+
+       switch (daifmt->fmt) {
+       case HDMI_I2S:
+               hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+               hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+               hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
+               hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+               hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
+               break;
+       default:
+               dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
+                       daifmt->fmt);
+               return -EINVAL;
+       }
+
+       memcpy(&hdmi_params.codec_params, params,
+              sizeof(hdmi_params.codec_params));
+
+       mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
+
+       return 0;
+}
+
+static int mtk_hdmi_audio_startup(struct device *dev, void *data)
+{
+       struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+       dev_dbg(dev, "%s\n", __func__);
+
+       mtk_hdmi_audio_enable(hdmi);
+
+       return 0;
+}
+
+static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+       struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+       dev_dbg(dev, "%s\n", __func__);
+
+       mtk_hdmi_audio_disable(hdmi);
+}
+
+int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+       struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+       dev_dbg(dev, "%s(%d)\n", __func__, enable);
+
+       if (enable)
+               mtk_hdmi_hw_aud_mute(hdmi);
+       else
+               mtk_hdmi_hw_aud_unmute(hdmi);
+
+       return 0;
+}
+
+static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
+{
+       struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+       dev_dbg(dev, "%s\n", __func__);
+
+       memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
+
+       return 0;
+}
+
+static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
+       .hw_params = mtk_hdmi_audio_hw_params,
+       .audio_startup = mtk_hdmi_audio_startup,
+       .audio_shutdown = mtk_hdmi_audio_shutdown,
+       .digital_mute = mtk_hdmi_audio_digital_mute,
+       .get_eld = mtk_hdmi_audio_get_eld,
+};
+
+static void mtk_hdmi_register_audio_driver(struct device *dev)
+{
+       struct hdmi_codec_pdata codec_data = {
+               .ops = &mtk_hdmi_audio_codec_ops,
+               .max_i2s_channels = 2,
+               .i2s = 1,
+       };
+       struct platform_device *pdev;
+
+       pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
+                                            PLATFORM_DEVID_AUTO, &codec_data,
+                                            sizeof(codec_data));
+       if (IS_ERR(pdev))
+               return;
+
+       DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
+}
+
+static int mtk_drm_hdmi_probe(struct platform_device *pdev)
+{
+       struct mtk_hdmi *hdmi;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+       if (!hdmi)
+               return -ENOMEM;
+
+       hdmi->dev = dev;
+
+       ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
+       if (ret)
+               return ret;
+
+       hdmi->phy = devm_phy_get(dev, "hdmi");
+       if (IS_ERR(hdmi->phy)) {
+               ret = PTR_ERR(hdmi->phy);
+               dev_err(dev, "Failed to get HDMI PHY: %d\n", ret);
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, hdmi);
+
+       ret = mtk_hdmi_output_init(hdmi);
+       if (ret) {
+               dev_err(dev, "Failed to initialize hdmi output\n");
+               return ret;
+       }
+
+       mtk_hdmi_register_audio_driver(dev);
+
+       hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
+       hdmi->bridge.of_node = pdev->dev.of_node;
+       ret = drm_bridge_add(&hdmi->bridge);
+       if (ret) {
+               dev_err(dev, "failed to add bridge, ret = %d\n", ret);
+               return ret;
+       }
+
+       ret = mtk_hdmi_clk_enable_audio(hdmi);
+       if (ret) {
+               dev_err(dev, "Failed to enable audio clocks: %d\n", ret);
+               goto err_bridge_remove;
+       }
+
+       dev_dbg(dev, "mediatek hdmi probe success\n");
+       return 0;
+
+err_bridge_remove:
+       drm_bridge_remove(&hdmi->bridge);
+       return ret;
+}
+
+static int mtk_drm_hdmi_remove(struct platform_device *pdev)
+{
+       struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
+
+       drm_bridge_remove(&hdmi->bridge);
+       mtk_hdmi_clk_disable_audio(hdmi);
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_hdmi_suspend(struct device *dev)
+{
+       struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+       mtk_hdmi_clk_disable_audio(hdmi);
+       dev_dbg(dev, "hdmi suspend success!\n");
+       return 0;
+}
+
+static int mtk_hdmi_resume(struct device *dev)
+{
+       struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+       int ret = 0;
+
+       ret = mtk_hdmi_clk_enable_audio(hdmi);
+       if (ret) {
+               dev_err(dev, "hdmi resume failed!\n");
+               return ret;
+       }
+
+       dev_dbg(dev, "hdmi resume success!\n");
+       return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
+                        mtk_hdmi_suspend, mtk_hdmi_resume);
+
+static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
+       { .compatible = "mediatek,mt8173-hdmi", },
+       {}
+};
+
+static struct platform_driver mtk_hdmi_driver = {
+       .probe = mtk_drm_hdmi_probe,
+       .remove = mtk_drm_hdmi_remove,
+       .driver = {
+               .name = "mediatek-drm-hdmi",
+               .of_match_table = mtk_drm_hdmi_of_ids,
+               .pm = &mtk_hdmi_pm_ops,
+       },
+};
+
+static struct platform_driver * const mtk_hdmi_drivers[] = {
+       &mtk_hdmi_phy_driver,
+       &mtk_hdmi_ddc_driver,
+       &mtk_cec_driver,
+       &mtk_hdmi_driver,
+};
+
+static int __init mtk_hdmitx_init(void)
+{
+       int ret;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) {
+               ret = platform_driver_register(mtk_hdmi_drivers[i]);
+               if (ret < 0) {
+                       pr_err("Failed to register %s driver: %d\n",
+                              mtk_hdmi_drivers[i]->driver.name, ret);
+                       goto err;
+               }
+       }
+
+       return 0;
+
+err:
+       while (--i >= 0)
+               platform_driver_unregister(mtk_hdmi_drivers[i]);
+
+       return ret;
+}
+
+static void __exit mtk_hdmitx_exit(void)
+{
+       int i;
+
+       for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--)
+               platform_driver_unregister(mtk_hdmi_drivers[i]);
+}
+
+module_init(mtk_hdmitx_init);
+module_exit(mtk_hdmitx_exit);
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
new file mode 100644 (file)
index 0000000..6371b3d
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MTK_HDMI_CTRL_H
+#define _MTK_HDMI_CTRL_H
+
+struct platform_driver;
+
+extern struct platform_driver mtk_cec_driver;
+extern struct platform_driver mtk_hdmi_ddc_driver;
+extern struct platform_driver mtk_hdmi_phy_driver;
+
+#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
new file mode 100644 (file)
index 0000000..33c9e1b
--- /dev/null
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#define SIF1_CLOK              (288)
+#define DDC_DDCMCTL0           (0x0)
+#define DDCM_ODRAIN                    BIT(31)
+#define DDCM_CLK_DIV_OFFSET            (16)
+#define DDCM_CLK_DIV_MASK              (0xfff << 16)
+#define DDCM_CS_STATUS                 BIT(4)
+#define DDCM_SCL_STATE                 BIT(3)
+#define DDCM_SDA_STATE                 BIT(2)
+#define DDCM_SM0EN                     BIT(1)
+#define DDCM_SCL_STRECH                        BIT(0)
+#define DDC_DDCMCTL1           (0x4)
+#define DDCM_ACK_OFFSET                        (16)
+#define DDCM_ACK_MASK                  (0xff << 16)
+#define DDCM_PGLEN_OFFSET              (8)
+#define DDCM_PGLEN_MASK                        (0x7 << 8)
+#define DDCM_SIF_MODE_OFFSET           (4)
+#define DDCM_SIF_MODE_MASK             (0x7 << 4)
+#define DDCM_START                     (0x1)
+#define DDCM_WRITE_DATA                        (0x2)
+#define DDCM_STOP                      (0x3)
+#define DDCM_READ_DATA_NO_ACK          (0x4)
+#define DDCM_READ_DATA_ACK             (0x5)
+#define DDCM_TRI                       BIT(0)
+#define DDC_DDCMD0             (0x8)
+#define DDCM_DATA3                     (0xff << 24)
+#define DDCM_DATA2                     (0xff << 16)
+#define DDCM_DATA1                     (0xff << 8)
+#define DDCM_DATA0                     (0xff << 0)
+#define DDC_DDCMD1             (0xc)
+#define DDCM_DATA7                     (0xff << 24)
+#define DDCM_DATA6                     (0xff << 16)
+#define DDCM_DATA5                     (0xff << 8)
+#define DDCM_DATA4                     (0xff << 0)
+
+struct mtk_hdmi_ddc {
+       struct i2c_adapter adap;
+       struct clk *clk;
+       void __iomem *regs;
+};
+
+static inline void sif_set_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+                              unsigned int val)
+{
+       writel(readl(ddc->regs + offset) | val, ddc->regs + offset);
+}
+
+static inline void sif_clr_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+                              unsigned int val)
+{
+       writel(readl(ddc->regs + offset) & ~val, ddc->regs + offset);
+}
+
+static inline bool sif_bit_is_set(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+                                 unsigned int val)
+{
+       return (readl(ddc->regs + offset) & val) == val;
+}
+
+static inline void sif_write_mask(struct mtk_hdmi_ddc *ddc, unsigned int offset,
+                                 unsigned int mask, unsigned int shift,
+                                 unsigned int val)
+{
+       unsigned int tmp;
+
+       tmp = readl(ddc->regs + offset);
+       tmp &= ~mask;
+       tmp |= (val << shift) & mask;
+       writel(tmp, ddc->regs + offset);
+}
+
+static inline unsigned int sif_read_mask(struct mtk_hdmi_ddc *ddc,
+                                        unsigned int offset, unsigned int mask,
+                                        unsigned int shift)
+{
+       return (readl(ddc->regs + offset) & mask) >> shift;
+}
+
+static void ddcm_trigger_mode(struct mtk_hdmi_ddc *ddc, int mode)
+{
+       u32 val;
+
+       sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_SIF_MODE_MASK,
+                      DDCM_SIF_MODE_OFFSET, mode);
+       sif_set_bit(ddc, DDC_DDCMCTL1, DDCM_TRI);
+       readl_poll_timeout(ddc->regs + DDC_DDCMCTL1, val,
+                          (val & DDCM_TRI) != DDCM_TRI, 4, 20000);
+}
+
+static int mtk_hdmi_ddc_read_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
+{
+       struct device *dev = ddc->adap.dev.parent;
+       u32 remain_count, ack_count, ack_final, read_count, temp_count;
+       u32 index = 0;
+       u32 ack;
+       int i;
+
+       ddcm_trigger_mode(ddc, DDCM_START);
+       sif_write_mask(ddc, DDC_DDCMD0, 0xff, 0, (msg->addr << 1) | 0x01);
+       sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
+                      0x00);
+       ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
+       ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
+       dev_dbg(dev, "ack = 0x%x\n", ack);
+       if (ack != 0x01) {
+               dev_err(dev, "i2c ack err!\n");
+               return -ENXIO;
+       }
+
+       remain_count = msg->len;
+       ack_count = (msg->len - 1) / 8;
+       ack_final = 0;
+
+       while (remain_count > 0) {
+               if (ack_count > 0) {
+                       read_count = 8;
+                       ack_final = 0;
+                       ack_count--;
+               } else {
+                       read_count = remain_count;
+                       ack_final = 1;
+               }
+
+               sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK,
+                              DDCM_PGLEN_OFFSET, read_count - 1);
+               ddcm_trigger_mode(ddc, (ack_final == 1) ?
+                                 DDCM_READ_DATA_NO_ACK :
+                                 DDCM_READ_DATA_ACK);
+
+               ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK,
+                                   DDCM_ACK_OFFSET);
+               temp_count = 0;
+               while (((ack & (1 << temp_count)) != 0) && (temp_count < 8))
+                       temp_count++;
+               if (((ack_final == 1) && (temp_count != (read_count - 1))) ||
+                   ((ack_final == 0) && (temp_count != read_count))) {
+                       dev_err(dev, "Address NACK! ACK(0x%x)\n", ack);
+                       break;
+               }
+
+               for (i = read_count; i >= 1; i--) {
+                       int shift;
+                       int offset;
+
+                       if (i > 4) {
+                               offset = DDC_DDCMD1;
+                               shift = (i - 5) * 8;
+                       } else {
+                               offset = DDC_DDCMD0;
+                               shift = (i - 1) * 8;
+                       }
+
+                       msg->buf[index + i - 1] = sif_read_mask(ddc, offset,
+                                                               0xff << shift,
+                                                               shift);
+               }
+
+               remain_count -= read_count;
+               index += read_count;
+       }
+
+       return 0;
+}
+
+static int mtk_hdmi_ddc_write_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
+{
+       struct device *dev = ddc->adap.dev.parent;
+       u32 ack;
+
+       ddcm_trigger_mode(ddc, DDCM_START);
+       sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA0, 0, msg->addr << 1);
+       sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA1, 8, msg->buf[0]);
+       sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
+                      0x1);
+       ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
+
+       ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
+       dev_dbg(dev, "ack = %d\n", ack);
+
+       if (ack != 0x03) {
+               dev_err(dev, "i2c ack err!\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int mtk_hdmi_ddc_xfer(struct i2c_adapter *adapter,
+                            struct i2c_msg *msgs, int num)
+{
+       struct mtk_hdmi_ddc *ddc = adapter->algo_data;
+       struct device *dev = adapter->dev.parent;
+       int ret;
+       int i;
+
+       if (!ddc) {
+               dev_err(dev, "invalid arguments\n");
+               return -EINVAL;
+       }
+
+       sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SCL_STRECH);
+       sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SM0EN);
+       sif_clr_bit(ddc, DDC_DDCMCTL0, DDCM_ODRAIN);
+
+       if (sif_bit_is_set(ddc, DDC_DDCMCTL1, DDCM_TRI)) {
+               dev_err(dev, "ddc line is busy!\n");
+               return -EBUSY;
+       }
+
+       sif_write_mask(ddc, DDC_DDCMCTL0, DDCM_CLK_DIV_MASK,
+                      DDCM_CLK_DIV_OFFSET, SIF1_CLOK);
+
+       for (i = 0; i < num; i++) {
+               struct i2c_msg *msg = &msgs[i];
+
+               dev_dbg(dev, "i2c msg, adr:0x%x, flags:%d, len :0x%x\n",
+                       msg->addr, msg->flags, msg->len);
+
+               if (msg->flags & I2C_M_RD)
+                       ret = mtk_hdmi_ddc_read_msg(ddc, msg);
+               else
+                       ret = mtk_hdmi_ddc_write_msg(ddc, msg);
+               if (ret < 0)
+                       goto xfer_end;
+       }
+
+       ddcm_trigger_mode(ddc, DDCM_STOP);
+
+       return i;
+
+xfer_end:
+       ddcm_trigger_mode(ddc, DDCM_STOP);
+       dev_err(dev, "ddc failed!\n");
+       return ret;
+}
+
+static u32 mtk_hdmi_ddc_func(struct i2c_adapter *adapter)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm mtk_hdmi_ddc_algorithm = {
+       .master_xfer = mtk_hdmi_ddc_xfer,
+       .functionality = mtk_hdmi_ddc_func,
+};
+
+static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mtk_hdmi_ddc *ddc;
+       struct resource *mem;
+       int ret;
+
+       ddc = devm_kzalloc(dev, sizeof(struct mtk_hdmi_ddc), GFP_KERNEL);
+       if (!ddc)
+               return -ENOMEM;
+
+       ddc->clk = devm_clk_get(dev, "ddc-i2c");
+       if (IS_ERR(ddc->clk)) {
+               dev_err(dev, "get ddc_clk failed: %p ,\n", ddc->clk);
+               return PTR_ERR(ddc->clk);
+       }
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ddc->regs = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(ddc->regs))
+               return PTR_ERR(ddc->regs);
+
+       ret = clk_prepare_enable(ddc->clk);
+       if (ret) {
+               dev_err(dev, "enable ddc clk failed!\n");
+               return ret;
+       }
+
+       strlcpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
+       ddc->adap.owner = THIS_MODULE;
+       ddc->adap.class = I2C_CLASS_DDC;
+       ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
+       ddc->adap.retries = 3;
+       ddc->adap.dev.of_node = dev->of_node;
+       ddc->adap.algo_data = ddc;
+       ddc->adap.dev.parent = &pdev->dev;
+
+       ret = i2c_add_adapter(&ddc->adap);
+       if (ret < 0) {
+               dev_err(dev, "failed to add bus to i2c core\n");
+               goto err_clk_disable;
+       }
+
+       platform_set_drvdata(pdev, ddc);
+
+       dev_dbg(dev, "ddc->adap: %p\n", &ddc->adap);
+       dev_dbg(dev, "ddc->clk: %p\n", ddc->clk);
+       dev_dbg(dev, "physical adr: %pa, end: %pa\n", &mem->start,
+               &mem->end);
+
+       return 0;
+
+err_clk_disable:
+       clk_disable_unprepare(ddc->clk);
+       return ret;
+}
+
+static int mtk_hdmi_ddc_remove(struct platform_device *pdev)
+{
+       struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev);
+
+       i2c_del_adapter(&ddc->adap);
+       clk_disable_unprepare(ddc->clk);
+
+       return 0;
+}
+
+static const struct of_device_id mtk_hdmi_ddc_match[] = {
+       { .compatible = "mediatek,mt8173-hdmi-ddc", },
+       {},
+};
+
+struct platform_driver mtk_hdmi_ddc_driver = {
+       .probe = mtk_hdmi_ddc_probe,
+       .remove = mtk_hdmi_ddc_remove,
+       .driver = {
+               .name = "mediatek-hdmi-ddc",
+               .of_match_table = mtk_hdmi_ddc_match,
+       },
+};
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
new file mode 100644 (file)
index 0000000..a5cb07d
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MTK_HDMI_REGS_H
+#define _MTK_HDMI_REGS_H
+
+#define GRL_INT_MASK           0x18
+#define GRL_IFM_PORT           0x188
+#define GRL_CH_SWAP            0x198
+#define LR_SWAP                                BIT(0)
+#define LFE_CC_SWAP                    BIT(1)
+#define LSRS_SWAP                      BIT(2)
+#define RLS_RRS_SWAP                   BIT(3)
+#define LR_STATUS_SWAP                 BIT(4)
+#define GRL_I2S_C_STA0         0x140
+#define GRL_I2S_C_STA1         0x144
+#define GRL_I2S_C_STA2         0x148
+#define GRL_I2S_C_STA3         0x14C
+#define GRL_I2S_C_STA4         0x150
+#define GRL_I2S_UV             0x154
+#define I2S_UV_V                       BIT(0)
+#define I2S_UV_U                       BIT(1)
+#define I2S_UV_CH_EN_MASK              0x3c
+#define I2S_UV_CH_EN(x)                        BIT((x) + 2)
+#define I2S_UV_TMDS_DEBUG              BIT(6)
+#define I2S_UV_NORMAL_INFO_INV         BIT(7)
+#define GRL_ACP_ISRC_CTRL      0x158
+#define VS_EN                          BIT(0)
+#define ACP_EN                         BIT(1)
+#define ISRC1_EN                       BIT(2)
+#define ISRC2_EN                       BIT(3)
+#define GAMUT_EN                       BIT(4)
+#define GRL_CTS_CTRL           0x160
+#define CTS_CTRL_SOFT                  BIT(0)
+#define GRL_INT                        0x14
+#define INT_MDI                                BIT(0)
+#define INT_HDCP                       BIT(1)
+#define INT_FIFO_O                     BIT(2)
+#define INT_FIFO_U                     BIT(3)
+#define INT_IFM_ERR                    BIT(4)
+#define INT_INF_DONE                   BIT(5)
+#define INT_NCTS_DONE                  BIT(6)
+#define INT_CTRL_PKT_DONE              BIT(7)
+#define GRL_INT_MASK           0x18
+#define GRL_CTRL               0x1C
+#define CTRL_GEN_EN                    BIT(2)
+#define CTRL_SPD_EN                    BIT(3)
+#define CTRL_MPEG_EN                   BIT(4)
+#define CTRL_AUDIO_EN                  BIT(5)
+#define CTRL_AVI_EN                    BIT(6)
+#define CTRL_AVMUTE                    BIT(7)
+#define        GRL_STATUS              0x20
+#define STATUS_HTPLG                   BIT(0)
+#define STATUS_PORD                    BIT(1)
+#define GRL_DIVN               0x170
+#define NCTS_WRI_ANYTIME               BIT(6)
+#define GRL_AUDIO_CFG          0x17C
+#define AUDIO_ZERO                     BIT(0)
+#define HIGH_BIT_RATE                  BIT(1)
+#define SACD_DST                       BIT(2)
+#define DST_NORMAL_DOUBLE              BIT(3)
+#define DSD_INV                                BIT(4)
+#define LR_INV                         BIT(5)
+#define LR_MIX                         BIT(6)
+#define DSD_SEL                                BIT(7)
+#define GRL_NCTS               0x184
+#define GRL_CH_SW0             0x18C
+#define GRL_CH_SW1             0x190
+#define GRL_CH_SW2             0x194
+#define CH_SWITCH(from, to)            ((from) << ((to) * 3))
+#define GRL_INFOFRM_VER                0x19C
+#define GRL_INFOFRM_TYPE       0x1A0
+#define GRL_INFOFRM_LNG                0x1A4
+#define GRL_MIX_CTRL           0x1B4
+#define MIX_CTRL_SRC_EN                        BIT(0)
+#define BYPASS_VOLUME                  BIT(1)
+#define MIX_CTRL_FLAT                  BIT(7)
+#define GRL_AOUT_CFG           0x1C4
+#define AOUT_BNUM_SEL_MASK             0x03
+#define AOUT_24BIT                     0x00
+#define AOUT_20BIT                     0x02
+#define AOUT_16BIT                     0x03
+#define AOUT_FIFO_ADAP_CTRL            BIT(6)
+#define AOUT_BURST_PREAMBLE_EN         BIT(7)
+#define HIGH_BIT_RATE_PACKET_ALIGN     (AOUT_BURST_PREAMBLE_EN | \
+                                        AOUT_FIFO_ADAP_CTRL)
+#define GRL_SHIFT_L1           0x1C0
+#define GRL_SHIFT_R2           0x1B0
+#define AUDIO_PACKET_OFF               BIT(6)
+#define GRL_CFG0               0x24
+#define CFG0_I2S_MODE_MASK             0x3
+#define CFG0_I2S_MODE_RTJ              0x1
+#define CFG0_I2S_MODE_LTJ              0x0
+#define CFG0_I2S_MODE_I2S              0x2
+#define CFG0_W_LENGTH_MASK             0x30
+#define CFG0_W_LENGTH_24BIT            0x00
+#define CFG0_W_LENGTH_16BIT            0x10
+#define GRL_CFG1               0x28
+#define CFG1_EDG_SEL                   BIT(0)
+#define CFG1_SPDIF                     BIT(1)
+#define CFG1_DVI                       BIT(2)
+#define CFG1_HDCP_DEBUG                        BIT(3)
+#define GRL_CFG2               0x2c
+#define CFG2_MHL_DE_SEL                        BIT(3)
+#define CFG2_MHL_FAKE_DE_SEL           BIT(4)
+#define CFG2_MHL_DATA_REMAP            BIT(5)
+#define CFG2_NOTICE_EN                 BIT(6)
+#define CFG2_ACLK_INV                  BIT(7)
+#define GRL_CFG3               0x30
+#define CFG3_AES_KEY_INDEX_MASK                0x3f
+#define CFG3_CONTROL_PACKET_DELAY      BIT(6)
+#define CFG3_KSV_LOAD_START            BIT(7)
+#define GRL_CFG4               0x34
+#define CFG4_AES_KEY_LOAD              BIT(4)
+#define CFG4_AV_UNMUTE_EN              BIT(5)
+#define CFG4_AV_UNMUTE_SET             BIT(6)
+#define CFG4_MHL_MODE                  BIT(7)
+#define GRL_CFG5               0x38
+#define CFG5_CD_RATIO_MASK     0x8F
+#define CFG5_FS128                     (0x1 << 4)
+#define CFG5_FS256                     (0x2 << 4)
+#define CFG5_FS384                     (0x3 << 4)
+#define CFG5_FS512                     (0x4 << 4)
+#define CFG5_FS768                     (0x6 << 4)
+#define DUMMY_304              0x304
+#define CHMO_SEL                       (0x3 << 2)
+#define CHM1_SEL                       (0x3 << 4)
+#define CHM2_SEL                       (0x3 << 6)
+#define AUDIO_I2S_NCTS_SEL             BIT(1)
+#define AUDIO_I2S_NCTS_SEL_64          (1 << 1)
+#define AUDIO_I2S_NCTS_SEL_128         (0 << 1)
+#define NEW_GCP_CTRL                   BIT(0)
+#define NEW_GCP_CTRL_MERGE             BIT(0)
+#define GRL_L_STATUS_0         0x200
+#define GRL_L_STATUS_1         0x204
+#define GRL_L_STATUS_2         0x208
+#define GRL_L_STATUS_3         0x20c
+#define GRL_L_STATUS_4         0x210
+#define GRL_L_STATUS_5         0x214
+#define GRL_L_STATUS_6         0x218
+#define GRL_L_STATUS_7         0x21c
+#define GRL_L_STATUS_8         0x220
+#define GRL_L_STATUS_9         0x224
+#define GRL_L_STATUS_10                0x228
+#define GRL_L_STATUS_11                0x22c
+#define GRL_L_STATUS_12                0x230
+#define GRL_L_STATUS_13                0x234
+#define GRL_L_STATUS_14                0x238
+#define GRL_L_STATUS_15                0x23c
+#define GRL_L_STATUS_16                0x240
+#define GRL_L_STATUS_17                0x244
+#define GRL_L_STATUS_18                0x248
+#define GRL_L_STATUS_19                0x24c
+#define GRL_L_STATUS_20                0x250
+#define GRL_L_STATUS_21                0x254
+#define GRL_L_STATUS_22                0x258
+#define GRL_L_STATUS_23                0x25c
+#define GRL_R_STATUS_0         0x260
+#define GRL_R_STATUS_1         0x264
+#define GRL_R_STATUS_2         0x268
+#define GRL_R_STATUS_3         0x26c
+#define GRL_R_STATUS_4         0x270
+#define GRL_R_STATUS_5         0x274
+#define GRL_R_STATUS_6         0x278
+#define GRL_R_STATUS_7         0x27c
+#define GRL_R_STATUS_8         0x280
+#define GRL_R_STATUS_9         0x284
+#define GRL_R_STATUS_10                0x288
+#define GRL_R_STATUS_11                0x28c
+#define GRL_R_STATUS_12                0x290
+#define GRL_R_STATUS_13                0x294
+#define GRL_R_STATUS_14                0x298
+#define GRL_R_STATUS_15                0x29c
+#define GRL_R_STATUS_16                0x2a0
+#define GRL_R_STATUS_17                0x2a4
+#define GRL_R_STATUS_18                0x2a8
+#define GRL_R_STATUS_19                0x2ac
+#define GRL_R_STATUS_20                0x2b0
+#define GRL_R_STATUS_21                0x2b4
+#define GRL_R_STATUS_22                0x2b8
+#define GRL_R_STATUS_23                0x2bc
+#define GRL_ABIST_CTRL0                0x2D4
+#define GRL_ABIST_CTRL1                0x2D8
+#define ABIST_EN                       BIT(7)
+#define ABIST_DATA_FMT                 (0x7 << 0)
+#define VIDEO_CFG_0            0x380
+#define VIDEO_CFG_1            0x384
+#define VIDEO_CFG_2            0x388
+#define VIDEO_CFG_3            0x38c
+#define VIDEO_CFG_4            0x390
+#define VIDEO_SOURCE_SEL               BIT(7)
+#define NORMAL_PATH                    (1 << 7)
+#define GEN_RGB                                (0 << 7)
+
+#define HDMI_SYS_CFG1C         0x000
+#define HDMI_ON                                BIT(0)
+#define HDMI_RST                       BIT(1)
+#define ANLG_ON                                BIT(2)
+#define CFG10_DVI                      BIT(3)
+#define HDMI_TST                       BIT(3)
+#define SYS_KEYMASK1                   (0xff << 8)
+#define SYS_KEYMASK2                   (0xff << 16)
+#define AUD_OUTSYNC_EN                 BIT(24)
+#define AUD_OUTSYNC_PRE_EN             BIT(25)
+#define I2CM_ON                                BIT(26)
+#define E2PROM_TYPE_8BIT               BIT(27)
+#define MCM_E2PROM_ON                  BIT(28)
+#define EXT_E2PROM_ON                  BIT(29)
+#define HTPLG_PIN_SEL_OFF              BIT(30)
+#define AES_EFUSE_ENABLE               BIT(31)
+#define HDMI_SYS_CFG20         0x004
+#define DEEP_COLOR_MODE_MASK           (3 << 1)
+#define COLOR_8BIT_MODE                        (0 << 1)
+#define COLOR_10BIT_MODE               (1 << 1)
+#define COLOR_12BIT_MODE               (2 << 1)
+#define COLOR_16BIT_MODE               (3 << 1)
+#define DEEP_COLOR_EN                  BIT(0)
+#define HDMI_AUDIO_TEST_SEL            BIT(8)
+#define HDMI2P0_EN                     BIT(11)
+#define HDMI_OUT_FIFO_EN               BIT(16)
+#define HDMI_OUT_FIFO_CLK_INV          BIT(17)
+#define MHL_MODE_ON                    BIT(28)
+#define MHL_PP_MODE                    BIT(29)
+#define MHL_SYNC_AUTO_EN               BIT(30)
+#define HDMI_PCLK_FREE_RUN             BIT(31)
+
+#define MTK_SIP_SET_AUTHORIZED_SECURE_REG 0x82000001
+#endif
index cf8f38d39e1017794c8cac8ea29dc03516b048f8..1c366f8cb2d0ec3bc16cf8a8e499474d7bc3d956 100644 (file)
@@ -431,7 +431,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
        phy_set_drvdata(phy, mipi_tx);
 
        phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-       if (IS_ERR(phy)) {
+       if (IS_ERR(phy_provider)) {
                ret = PTR_ERR(phy_provider);
                return ret;
        }
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
new file mode 100644 (file)
index 0000000..8a24754
--- /dev/null
@@ -0,0 +1,515 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define HDMI_CON0              0x00
+#define RG_HDMITX_PLL_EN               BIT(31)
+#define RG_HDMITX_PLL_FBKDIV           (0x7f << 24)
+#define PLL_FBKDIV_SHIFT               24
+#define RG_HDMITX_PLL_FBKSEL           (0x3 << 22)
+#define PLL_FBKSEL_SHIFT               22
+#define RG_HDMITX_PLL_PREDIV           (0x3 << 20)
+#define PREDIV_SHIFT                   20
+#define RG_HDMITX_PLL_POSDIV           (0x3 << 18)
+#define POSDIV_SHIFT                   18
+#define RG_HDMITX_PLL_RST_DLY          (0x3 << 16)
+#define RG_HDMITX_PLL_IR               (0xf << 12)
+#define PLL_IR_SHIFT                   12
+#define RG_HDMITX_PLL_IC               (0xf << 8)
+#define PLL_IC_SHIFT                   8
+#define RG_HDMITX_PLL_BP               (0xf << 4)
+#define PLL_BP_SHIFT                   4
+#define RG_HDMITX_PLL_BR               (0x3 << 2)
+#define PLL_BR_SHIFT                   2
+#define RG_HDMITX_PLL_BC               (0x3 << 0)
+#define PLL_BC_SHIFT                   0
+#define HDMI_CON1              0x04
+#define RG_HDMITX_PLL_DIVEN            (0x7 << 29)
+#define PLL_DIVEN_SHIFT                        29
+#define RG_HDMITX_PLL_AUTOK_EN         BIT(28)
+#define RG_HDMITX_PLL_AUTOK_KF         (0x3 << 26)
+#define RG_HDMITX_PLL_AUTOK_KS         (0x3 << 24)
+#define RG_HDMITX_PLL_AUTOK_LOAD       BIT(23)
+#define RG_HDMITX_PLL_BAND             (0x3f << 16)
+#define RG_HDMITX_PLL_REF_SEL          BIT(15)
+#define RG_HDMITX_PLL_BIAS_EN          BIT(14)
+#define RG_HDMITX_PLL_BIAS_LPF_EN      BIT(13)
+#define RG_HDMITX_PLL_TXDIV_EN         BIT(12)
+#define RG_HDMITX_PLL_TXDIV            (0x3 << 10)
+#define PLL_TXDIV_SHIFT                        10
+#define RG_HDMITX_PLL_LVROD_EN         BIT(9)
+#define RG_HDMITX_PLL_MONVC_EN         BIT(8)
+#define RG_HDMITX_PLL_MONCK_EN         BIT(7)
+#define RG_HDMITX_PLL_MONREF_EN                BIT(6)
+#define RG_HDMITX_PLL_TST_EN           BIT(5)
+#define RG_HDMITX_PLL_TST_CK_EN                BIT(4)
+#define RG_HDMITX_PLL_TST_SEL          (0xf << 0)
+#define HDMI_CON2              0x08
+#define RGS_HDMITX_PLL_AUTOK_BAND      (0x7f << 8)
+#define RGS_HDMITX_PLL_AUTOK_FAIL      BIT(1)
+#define RG_HDMITX_EN_TX_CKLDO          BIT(0)
+#define HDMI_CON3              0x0c
+#define RG_HDMITX_SER_EN               (0xf << 28)
+#define RG_HDMITX_PRD_EN               (0xf << 24)
+#define RG_HDMITX_PRD_IMP_EN           (0xf << 20)
+#define RG_HDMITX_DRV_EN               (0xf << 16)
+#define RG_HDMITX_DRV_IMP_EN           (0xf << 12)
+#define DRV_IMP_EN_SHIFT               12
+#define RG_HDMITX_MHLCK_FORCE          BIT(10)
+#define RG_HDMITX_MHLCK_PPIX_EN                BIT(9)
+#define RG_HDMITX_MHLCK_EN             BIT(8)
+#define RG_HDMITX_SER_DIN_SEL          (0xf << 4)
+#define RG_HDMITX_SER_5T1_BIST_EN      BIT(3)
+#define RG_HDMITX_SER_BIST_TOG         BIT(2)
+#define RG_HDMITX_SER_DIN_TOG          BIT(1)
+#define RG_HDMITX_SER_CLKDIG_INV       BIT(0)
+#define HDMI_CON4              0x10
+#define RG_HDMITX_PRD_IBIAS_CLK                (0xf << 24)
+#define RG_HDMITX_PRD_IBIAS_D2         (0xf << 16)
+#define RG_HDMITX_PRD_IBIAS_D1         (0xf << 8)
+#define RG_HDMITX_PRD_IBIAS_D0         (0xf << 0)
+#define PRD_IBIAS_CLK_SHIFT            24
+#define PRD_IBIAS_D2_SHIFT             16
+#define PRD_IBIAS_D1_SHIFT             8
+#define PRD_IBIAS_D0_SHIFT             0
+#define HDMI_CON5              0x14
+#define RG_HDMITX_DRV_IBIAS_CLK                (0x3f << 24)
+#define RG_HDMITX_DRV_IBIAS_D2         (0x3f << 16)
+#define RG_HDMITX_DRV_IBIAS_D1         (0x3f << 8)
+#define RG_HDMITX_DRV_IBIAS_D0         (0x3f << 0)
+#define DRV_IBIAS_CLK_SHIFT            24
+#define DRV_IBIAS_D2_SHIFT             16
+#define DRV_IBIAS_D1_SHIFT             8
+#define DRV_IBIAS_D0_SHIFT             0
+#define HDMI_CON6              0x18
+#define RG_HDMITX_DRV_IMP_CLK          (0x3f << 24)
+#define RG_HDMITX_DRV_IMP_D2           (0x3f << 16)
+#define RG_HDMITX_DRV_IMP_D1           (0x3f << 8)
+#define RG_HDMITX_DRV_IMP_D0           (0x3f << 0)
+#define DRV_IMP_CLK_SHIFT              24
+#define DRV_IMP_D2_SHIFT               16
+#define DRV_IMP_D1_SHIFT               8
+#define DRV_IMP_D0_SHIFT               0
+#define HDMI_CON7              0x1c
+#define RG_HDMITX_MHLCK_DRV_IBIAS      (0x1f << 27)
+#define RG_HDMITX_SER_DIN              (0x3ff << 16)
+#define RG_HDMITX_CHLDC_TST            (0xf << 12)
+#define RG_HDMITX_CHLCK_TST            (0xf << 8)
+#define RG_HDMITX_RESERVE              (0xff << 0)
+#define HDMI_CON8              0x20
+#define RGS_HDMITX_2T1_LEV             (0xf << 16)
+#define RGS_HDMITX_2T1_EDG             (0xf << 12)
+#define RGS_HDMITX_5T1_LEV             (0xf << 8)
+#define RGS_HDMITX_5T1_EDG             (0xf << 4)
+#define RGS_HDMITX_PLUG_TST            BIT(0)
+
+struct mtk_hdmi_phy {
+       void __iomem *regs;
+       struct device *dev;
+       struct clk *pll;
+       struct clk_hw pll_hw;
+       unsigned long pll_rate;
+       u8 drv_imp_clk;
+       u8 drv_imp_d2;
+       u8 drv_imp_d1;
+       u8 drv_imp_d0;
+       u32 ibias;
+       u32 ibias_up;
+};
+
+static const u8 PREDIV[3][4] = {
+       {0x0, 0x0, 0x0, 0x0},   /* 27Mhz */
+       {0x1, 0x1, 0x1, 0x1},   /* 74Mhz */
+       {0x1, 0x1, 0x1, 0x1}    /* 148Mhz */
+};
+
+static const u8 TXDIV[3][4] = {
+       {0x3, 0x3, 0x3, 0x2},   /* 27Mhz */
+       {0x2, 0x1, 0x1, 0x1},   /* 74Mhz */
+       {0x1, 0x0, 0x0, 0x0}    /* 148Mhz */
+};
+
+static const u8 FBKSEL[3][4] = {
+       {0x1, 0x1, 0x1, 0x1},   /* 27Mhz */
+       {0x1, 0x0, 0x1, 0x1},   /* 74Mhz */
+       {0x1, 0x0, 0x1, 0x1}    /* 148Mhz */
+};
+
+static const u8 FBKDIV[3][4] = {
+       {19, 24, 29, 19},       /* 27Mhz */
+       {19, 24, 14, 19},       /* 74Mhz */
+       {19, 24, 14, 19}        /* 148Mhz */
+};
+
+static const u8 DIVEN[3][4] = {
+       {0x2, 0x1, 0x1, 0x2},   /* 27Mhz */
+       {0x2, 0x2, 0x2, 0x2},   /* 74Mhz */
+       {0x2, 0x2, 0x2, 0x2}    /* 148Mhz */
+};
+
+static const u8 HTPLLBP[3][4] = {
+       {0xc, 0xc, 0x8, 0xc},   /* 27Mhz */
+       {0xc, 0xf, 0xf, 0xc},   /* 74Mhz */
+       {0xc, 0xf, 0xf, 0xc}    /* 148Mhz */
+};
+
+static const u8 HTPLLBC[3][4] = {
+       {0x2, 0x3, 0x3, 0x2},   /* 27Mhz */
+       {0x2, 0x3, 0x3, 0x2},   /* 74Mhz */
+       {0x2, 0x3, 0x3, 0x2}    /* 148Mhz */
+};
+
+static const u8 HTPLLBR[3][4] = {
+       {0x1, 0x1, 0x0, 0x1},   /* 27Mhz */
+       {0x1, 0x2, 0x2, 0x1},   /* 74Mhz */
+       {0x1, 0x2, 0x2, 0x1}    /* 148Mhz */
+};
+
+static void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+                                   u32 bits)
+{
+       void __iomem *reg = hdmi_phy->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp &= ~bits;
+       writel(tmp, reg);
+}
+
+static void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+                                 u32 bits)
+{
+       void __iomem *reg = hdmi_phy->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp |= bits;
+       writel(tmp, reg);
+}
+
+static void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+                             u32 val, u32 mask)
+{
+       void __iomem *reg = hdmi_phy->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp = (tmp & ~mask) | (val & mask);
+       writel(tmp, reg);
+}
+
+static inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
+{
+       return container_of(hw, struct mtk_hdmi_phy, pll_hw);
+}
+
+static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       dev_dbg(hdmi_phy->dev, "%s\n", __func__);
+
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+       usleep_range(100, 150);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+       usleep_range(100, 150);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+
+       return 0;
+}
+
+static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       dev_dbg(hdmi_phy->dev, "%s\n", __func__);
+
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+       usleep_range(100, 150);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+       usleep_range(100, 150);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+       usleep_range(100, 150);
+}
+
+static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+       unsigned int pre_div;
+       unsigned int div;
+
+       dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
+               rate, parent_rate);
+
+       if (rate <= 27000000) {
+               pre_div = 0;
+               div = 3;
+       } else if (rate <= 74250000) {
+               pre_div = 1;
+               div = 2;
+       } else {
+               pre_div = 1;
+               div = 1;
+       }
+
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+                         (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+                         (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT),
+                         RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
+                         (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+                         (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT),
+                         RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
+                         (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+                         (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) |
+                         (0x1 << PLL_BR_SHIFT),
+                         RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
+                         RG_HDMITX_PLL_BR);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
+                         (0x3 << PRD_IBIAS_CLK_SHIFT) |
+                         (0x3 << PRD_IBIAS_D2_SHIFT) |
+                         (0x3 << PRD_IBIAS_D1_SHIFT) |
+                         (0x3 << PRD_IBIAS_D0_SHIFT),
+                         RG_HDMITX_PRD_IBIAS_CLK |
+                         RG_HDMITX_PRD_IBIAS_D2 |
+                         RG_HDMITX_PRD_IBIAS_D1 |
+                         RG_HDMITX_PRD_IBIAS_D0);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
+                         (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
+                         (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
+                         (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
+                         (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) |
+                         (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT),
+                         RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
+                         RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
+                         (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
+                         (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
+                         (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
+                         (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
+                         RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
+                         RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
+       return 0;
+}
+
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       hdmi_phy->pll_rate = rate;
+       if (rate <= 74250000)
+               *parent_rate = rate;
+       else
+               *parent_rate = rate / 2;
+
+       return rate;
+}
+
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       return hdmi_phy->pll_rate;
+}
+
+static const struct clk_ops mtk_hdmi_pll_ops = {
+       .prepare = mtk_hdmi_pll_prepare,
+       .unprepare = mtk_hdmi_pll_unprepare,
+       .set_rate = mtk_hdmi_pll_set_rate,
+       .round_rate = mtk_hdmi_pll_round_rate,
+       .recalc_rate = mtk_hdmi_pll_recalc_rate,
+};
+
+static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
+                             RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
+                             RG_HDMITX_DRV_EN);
+       usleep_range(100, 150);
+}
+
+static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
+                               RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
+                               RG_HDMITX_SER_EN);
+}
+
+static int mtk_hdmi_phy_power_on(struct phy *phy)
+{
+       struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+       int ret;
+
+       ret = clk_prepare_enable(hdmi_phy->pll);
+       if (ret < 0)
+               return ret;
+
+       mtk_hdmi_phy_enable_tmds(hdmi_phy);
+
+       return 0;
+}
+
+static int mtk_hdmi_phy_power_off(struct phy *phy)
+{
+       struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+
+       mtk_hdmi_phy_disable_tmds(hdmi_phy);
+       clk_disable_unprepare(hdmi_phy->pll);
+
+       return 0;
+}
+
+static const struct phy_ops mtk_hdmi_phy_ops = {
+       .power_on = mtk_hdmi_phy_power_on,
+       .power_off = mtk_hdmi_phy_power_off,
+       .owner = THIS_MODULE,
+};
+
+static int mtk_hdmi_phy_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mtk_hdmi_phy *hdmi_phy;
+       struct resource *mem;
+       struct clk *ref_clk;
+       const char *ref_clk_name;
+       struct clk_init_data clk_init = {
+               .ops = &mtk_hdmi_pll_ops,
+               .num_parents = 1,
+               .parent_names = (const char * const *)&ref_clk_name,
+               .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
+       };
+       struct phy *phy;
+       struct phy_provider *phy_provider;
+       int ret;
+
+       hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
+       if (!hdmi_phy)
+               return -ENOMEM;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       hdmi_phy->regs = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(hdmi_phy->regs)) {
+               ret = PTR_ERR(hdmi_phy->regs);
+               dev_err(dev, "Failed to get memory resource: %d\n", ret);
+               return ret;
+       }
+
+       ref_clk = devm_clk_get(dev, "pll_ref");
+       if (IS_ERR(ref_clk)) {
+               ret = PTR_ERR(ref_clk);
+               dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
+                       ret);
+               return ret;
+       }
+       ref_clk_name = __clk_get_name(ref_clk);
+
+       ret = of_property_read_string(dev->of_node, "clock-output-names",
+                                     &clk_init.name);
+       if (ret < 0) {
+               dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
+               return ret;
+       }
+
+       hdmi_phy->pll_hw.init = &clk_init;
+       hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
+       if (IS_ERR(hdmi_phy->pll)) {
+               ret = PTR_ERR(hdmi_phy->pll);
+               dev_err(dev, "Failed to register PLL: %d\n", ret);
+               return ret;
+       }
+
+       ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
+                                  &hdmi_phy->ibias);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
+               return ret;
+       }
+
+       ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
+                                  &hdmi_phy->ibias_up);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
+               return ret;
+       }
+
+       dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
+       hdmi_phy->drv_imp_clk = 0x30;
+       hdmi_phy->drv_imp_d2 = 0x30;
+       hdmi_phy->drv_imp_d1 = 0x30;
+       hdmi_phy->drv_imp_d0 = 0x30;
+
+       phy = devm_phy_create(dev, NULL, &mtk_hdmi_phy_ops);
+       if (IS_ERR(phy)) {
+               dev_err(dev, "Failed to create HDMI PHY\n");
+               return PTR_ERR(phy);
+       }
+       phy_set_drvdata(phy, hdmi_phy);
+
+       phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+       if (IS_ERR(phy_provider))
+               return PTR_ERR(phy_provider);
+
+       hdmi_phy->dev = dev;
+       return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
+                                  hdmi_phy->pll);
+}
+
+static int mtk_hdmi_phy_remove(struct platform_device *pdev)
+{
+       return 0;
+}
+
+static const struct of_device_id mtk_hdmi_phy_match[] = {
+       { .compatible = "mediatek,mt8173-hdmi-phy", },
+       {},
+};
+
+struct platform_driver mtk_hdmi_phy_driver = {
+       .probe = mtk_hdmi_phy_probe,
+       .remove = mtk_hdmi_phy_remove,
+       .driver = {
+               .name = "mediatek-hdmi-phy",
+               .of_match_table = mtk_hdmi_phy_match,
+       },
+};
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
index 3a1c5fbae54a5486252e91dd4ae1c0f9fdf95a59..520e5e668d6cefd1a6204c9b7626704ccf5f27bf 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_MGAG200
        tristate "Kernel modesetting driver for MGA G200 server engines"
        depends on DRM && PCI
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        help
         This is a KMS driver for the MGA G200 server chips, it
index ebb470ff7200c800a44f1db3d66a72bc416cdb0f..2b4b125eebc3ef9e5987d3bcc32d429f5b15ff8e 100644 (file)
@@ -101,7 +101,7 @@ static struct drm_driver driver = {
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
 
-       .gem_free_object = mgag200_gem_free_object,
+       .gem_free_object_unlocked = mgag200_gem_free_object,
        .dumb_create = mgag200_dumb_create,
        .dumb_map_offset = mgag200_dumb_mmap_offset,
        .dumb_destroy = drm_gem_dumb_destroy,
index 14e64e08909ed1769e39d88d3a10124bfe2c2a2a..6b21cb27e1cc77fe0476017c26676c786c5bca8f 100644 (file)
@@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
                        }
                }
 
-               fvv = pllreffreq * testn / testm;
+               fvv = pllreffreq * (n + 1) / (m + 1);
                fvv = (fvv - 800000) / 50000;
 
                if (fvv > 15)
@@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
        WREG_DAC(MGA1064_PIX_PLLC_M, m);
        WREG_DAC(MGA1064_PIX_PLLC_N, n);
        WREG_DAC(MGA1064_PIX_PLLC_P, p);
+
+       if (mdev->unique_rev_id >= 0x04) {
+               WREG_DAC(0x1a, 0x09);
+               msleep(20);
+               WREG_DAC(0x1a, 0x01);
+
+       }
+
        return 0;
 }
 
@@ -1344,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
  * use this for 8-bit mode so can't perform smooth fades on deeper modes,
  * but it's a requirement that we provide the function
  */
-static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-                                 u16 *blue, uint32_t start, uint32_t size)
+static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                             u16 *blue, uint32_t size)
 {
        struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
-       int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
        int i;
 
-       for (i = start; i < end; i++) {
+       for (i = 0; i < size; i++) {
                mga_crtc->lut_r[i] = red[i] >> 8;
                mga_crtc->lut_g[i] = green[i] >> 8;
                mga_crtc->lut_b[i] = blue[i] >> 8;
        }
        mga_crtc_load_lut(crtc);
+
+       return 0;
 }
 
 /* Simple cleanup function */
index 9d5083d0f1eec7a0c84c2f4e9bb7746e4f21e785..68268e55d595c343eed84053cb5bc19b4918f529 100644 (file)
@@ -186,17 +186,6 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
 {
 }
 
-static int mgag200_bo_move(struct ttm_buffer_object *bo,
-                      bool evict, bool interruptible,
-                      bool no_wait_gpu,
-                      struct ttm_mem_reg *new_mem)
-{
-       int r;
-       r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
-       return r;
-}
-
-
 static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
 {
        ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
        .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
        .init_mem_type = mgag200_bo_init_mem_type,
        .evict_flags = mgag200_bo_evict_flags,
-       .move = mgag200_bo_move,
+       .move = NULL,
        .verify_access = mgag200_bo_verify_access,
        .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
        .io_mem_free = &mgag200_ttm_io_mem_free,
index 167a4971f47c59d20a29b86b9a98a651426055ab..7c7a0314a7563f46a62b1ef4f156aba7615ebc03 100644 (file)
@@ -10,6 +10,7 @@ config DRM_MSM
        select SHMEM
        select TMPFS
        select QCOM_SCM
+       select SND_SOC_HDMI_CODEC if SND_SOC
        default y
        help
          DRM/KMS driver for MSM/snapdragon.
index 60cb02624dc0b0b1dd14cb78c41181589529c999..4e2806cf778c9d6f654b49c3d8e1a6bd3f6af222 100644 (file)
@@ -35,6 +35,7 @@ msm-y := \
        mdp/mdp5/mdp5_crtc.o \
        mdp/mdp5/mdp5_encoder.o \
        mdp/mdp5/mdp5_irq.o \
+       mdp/mdp5/mdp5_mdss.o \
        mdp/mdp5/mdp5_kms.o \
        mdp/mdp5/mdp5_plane.o \
        mdp/mdp5/mdp5_smp.o \
@@ -45,6 +46,7 @@ msm-y := \
        msm_fence.o \
        msm_gem.o \
        msm_gem_prime.o \
+       msm_gem_shrinker.o \
        msm_gem_submit.o \
        msm_gpu.o \
        msm_iommu.o \
index fbe304ee6c809548bf734be1e9ee7d56a1554392..f386f463278d15e865f699f0cc563c37f5de8c9d 100644 (file)
@@ -139,7 +139,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct msm_drm_private *priv = gpu->dev->dev_private;
        struct msm_ringbuffer *ring = gpu->rb;
-       unsigned i, ibs = 0;
+       unsigned i;
 
        for (i = 0; i < submit->nr_cmds; i++) {
                switch (submit->cmd[i].type) {
@@ -155,18 +155,11 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                                CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
                        OUT_RING(ring, submit->cmd[i].iova);
                        OUT_RING(ring, submit->cmd[i].size);
-                       ibs++;
+                       OUT_PKT2(ring);
                        break;
                }
        }
 
-       /* on a320, at least, we seem to need to pad things out to an
-        * even number of qwords to avoid issue w/ CP hanging on wrap-
-        * around:
-        */
-       if (ibs % 2)
-               OUT_PKT2(ring);
-
        OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
        OUT_RING(ring, submit->fence->seqno);
 
@@ -407,8 +400,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return ret;
        }
 
-       adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
-       if (!adreno_gpu->memptrs) {
+       adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
+       if (IS_ERR(adreno_gpu->memptrs)) {
                dev_err(drm->dev, "could not vmap memptrs\n");
                return -ENOMEM;
        }
@@ -426,8 +419,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 void adreno_gpu_cleanup(struct adreno_gpu *gpu)
 {
        if (gpu->memptrs_bo) {
+               if (gpu->memptrs)
+                       msm_gem_put_vaddr(gpu->memptrs_bo);
+
                if (gpu->memptrs_iova)
                        msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+
                drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
        }
        release_firmware(gpu->pm4);
index 6edcd6f57e70d4da4170f419d62fc5669a36ef29..ec572f8389edb86dea1850984c84260e53c17a29 100644 (file)
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
        struct platform_device *phy_pdev;
        struct device_node *phy_node;
 
-       phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0);
+       phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
        if (!phy_node) {
                dev_err(&pdev->dev, "cannot find phy device\n");
                return -ENXIO;
index 93c1ee094eac867074c3da9be5b38970703c657f..63436d8ee470ea1ca98728ac7707d8ee141ecb9c 100644 (file)
@@ -29,6 +29,8 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
        },
        .bus_clk_names = dsi_v2_bus_clk_names,
        .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
+       .io_start = { 0x4700000, 0x5800000 },
+       .num_dsi = 2,
 };
 
 static const char * const dsi_6g_bus_clk_names[] = {
@@ -48,6 +50,8 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
        },
        .bus_clk_names = dsi_6g_bus_clk_names,
        .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+       .io_start = { 0xfd922800, 0xfd922b00 },
+       .num_dsi = 2,
 };
 
 static const char * const dsi_8916_bus_clk_names[] = {
@@ -66,6 +70,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
        },
        .bus_clk_names = dsi_8916_bus_clk_names,
        .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
+       .io_start = { 0x1a98000 },
+       .num_dsi = 1,
 };
 
 static const struct msm_dsi_config msm8994_dsi_cfg = {
@@ -84,6 +90,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
        },
        .bus_clk_names = dsi_6g_bus_clk_names,
        .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
+       .io_start = { 0xfd998000, 0xfd9a0000 },
+       .num_dsi = 2,
 };
 
 static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
index a68c836744a3d1f223447905b380b5299d5f4d1d..eeacc323249481c14fc75d477824420fdd56637e 100644 (file)
@@ -34,6 +34,8 @@ struct msm_dsi_config {
        struct dsi_reg_config reg_cfg;
        const char * const *bus_clk_names;
        const int num_bus_clks;
+       const resource_size_t io_start[DSI_MAX];
+       const int num_dsi;
 };
 
 struct msm_dsi_cfg_handler {
index a3e47ad83eb398f1e1c803eb86d0c2a8b13885d5..f05ed0e1f3d655d0009438fd923e9438c79be1d5 100644 (file)
@@ -1066,7 +1066,7 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
        }
 
        if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
-               data = msm_gem_vaddr(msm_host->tx_gem_obj);
+               data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
                if (IS_ERR(data)) {
                        ret = PTR_ERR(data);
                        pr_err("%s: get vaddr failed, %d\n", __func__, ret);
@@ -1094,6 +1094,9 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
        if (packet.size < len)
                memset(data + packet.size, 0xff, len - packet.size);
 
+       if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
+               msm_gem_put_vaddr(msm_host->tx_gem_obj);
+
        return len;
 }
 
@@ -1543,7 +1546,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
        u32 lane_map[4];
        int ret, i, len, num_lanes;
 
-       prop = of_find_property(ep, "qcom,data-lane-map", &len);
+       prop = of_find_property(ep, "data-lanes", &len);
        if (!prop) {
                dev_dbg(dev, "failed to find data lane mapping\n");
                return -EINVAL;
@@ -1558,7 +1561,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
 
        msm_host->num_data_lanes = num_lanes;
 
-       ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map,
+       ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
                                         num_lanes);
        if (ret) {
                dev_err(dev, "failed to read lane data\n");
@@ -1573,8 +1576,19 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
                const int *swap = supported_data_lane_swaps[i];
                int j;
 
+               /*
+                * the data-lanes array we get from DT has a logical->physical
+                * mapping. The "data lane swap" register field represents
+                * supported configurations in a physical->logical mapping.
+                * Translate the DT mapping to what we understand and find a
+                * configuration that works.
+                */
                for (j = 0; j < num_lanes; j++) {
-                       if (swap[j] != lane_map[j])
+                       if (lane_map[j] < 0 || lane_map[j] > 3)
+                               dev_err(dev, "bad physical lane entry %u\n",
+                                       lane_map[j]);
+
+                       if (swap[lane_map[j]] != j)
                                break;
                }
 
@@ -1594,20 +1608,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
        struct device_node *endpoint, *device_node;
        int ret;
 
-       ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
-       if (ret) {
-               dev_err(dev, "%s: host index not specified, ret=%d\n",
-                       __func__, ret);
-               return ret;
-       }
-
        /*
-        * Get the first endpoint node. In our case, dsi has one output port
-        * to which the panel is connected. Don't return an error if a port
-        * isn't defined. It's possible that there is nothing connected to
-        * the dsi output.
+        * Get the endpoint of the output port of the DSI host. In our case,
+        * this is mapped to port number with reg = 1. Don't return an error if
+        * the remote endpoint isn't defined. It's possible that there is
+        * nothing connected to the dsi output.
         */
-       endpoint = of_graph_get_next_endpoint(np, NULL);
+       endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
        if (!endpoint) {
                dev_dbg(dev, "%s: no endpoint\n", __func__);
                return 0;
@@ -1648,6 +1655,25 @@ err:
        return ret;
 }
 
+static int dsi_host_get_id(struct msm_dsi_host *msm_host)
+{
+       struct platform_device *pdev = msm_host->pdev;
+       const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
+       struct resource *res;
+       int i;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
+       if (!res)
+               return -EINVAL;
+
+       for (i = 0; i < cfg->num_dsi; i++) {
+               if (cfg->io_start[i] == res->start)
+                       return i;
+       }
+
+       return -EINVAL;
+}
+
 int msm_dsi_host_init(struct msm_dsi *msm_dsi)
 {
        struct msm_dsi_host *msm_host = NULL;
@@ -1684,6 +1710,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
                goto fail;
        }
 
+       msm_host->id = dsi_host_get_id(msm_host);
+       if (msm_host->id < 0) {
+               ret = msm_host->id;
+               pr_err("%s: unable to identify DSI host index\n", __func__);
+               goto fail;
+       }
+
        /* fixup base address by io offset */
        msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
 
@@ -2245,9 +2278,9 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
        }
 
        msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
-       if (IS_ERR(msm_host->mode)) {
+       if (!msm_host->mode) {
                pr_err("%s: cannot duplicate mode\n", __func__);
-               return PTR_ERR(msm_host->mode);
+               return -ENOMEM;
        }
 
        return 0;
index e2f42d8ea29412e415577f33367d9494bd726ab0..f39386ed75e47d69827d1f62d6f20acecc9e7a75 100644 (file)
@@ -271,6 +271,30 @@ static const struct of_device_id dsi_phy_dt_match[] = {
        {}
 };
 
+/*
+ * Currently, we only support one SoC for each PHY type. When we have multiple
+ * SoCs for the same PHY, we can try to make the index searching a bit more
+ * clever.
+ */
+static int dsi_phy_get_id(struct msm_dsi_phy *phy)
+{
+       struct platform_device *pdev = phy->pdev;
+       const struct msm_dsi_phy_cfg *cfg = phy->cfg;
+       struct resource *res;
+       int i;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
+       if (!res)
+               return -EINVAL;
+
+       for (i = 0; i < cfg->num_dsi_phy; i++) {
+               if (cfg->io_start[i] == res->start)
+                       return i;
+       }
+
+       return -EINVAL;
+}
+
 static int dsi_phy_driver_probe(struct platform_device *pdev)
 {
        struct msm_dsi_phy *phy;
@@ -289,10 +313,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
        phy->cfg = match->data;
        phy->pdev = pdev;
 
-       ret = of_property_read_u32(dev->of_node,
-                               "qcom,dsi-phy-index", &phy->id);
-       if (ret) {
-               dev_err(dev, "%s: PHY index not specified, %d\n",
+       phy->id = dsi_phy_get_id(phy);
+       if (phy->id < 0) {
+               ret = phy->id;
+               dev_err(dev, "%s: couldn't identify PHY index, %d\n",
                        __func__, ret);
                goto fail;
        }
index 0d54ed00386dee2a1e58e2237c3d8384d33e65a6..f24a85439b942e08b051f018379873fad5581261 100644 (file)
@@ -38,6 +38,8 @@ struct msm_dsi_phy_cfg {
         * Fill default H/W values in illegal cells, eg. cell {0, 1}.
         */
        bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+       const resource_size_t io_start[DSI_MAX];
+       const int num_dsi_phy;
 };
 
 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
index f4bc11af849a5ea23272c7f5e64ec58f70900815..c757e2070cac7f104b0e1aaf5037bf6c2d95e73c 100644 (file)
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
        .ops = {
                .enable = dsi_20nm_phy_enable,
                .disable = dsi_20nm_phy_disable,
-       }
+       },
+       .io_start = { 0xfd998300, 0xfd9a0300 },
+       .num_dsi_phy = 2,
 };
 
index 96d1852af41868a79d0eceeffa66e44eb34e9f5f..63d7fba31380e4091487f7d0e4ecfdaf5910ce38 100644 (file)
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
                .enable = dsi_28nm_phy_enable,
                .disable = dsi_28nm_phy_disable,
        },
+       .io_start = { 0xfd922b00, 0xfd923100 },
+       .num_dsi_phy = 2,
 };
 
 const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
@@ -160,5 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
                .enable = dsi_28nm_phy_enable,
                .disable = dsi_28nm_phy_disable,
        },
+       .io_start = { 0x1a98500 },
+       .num_dsi_phy = 1,
 };
 
index 213355a3e767a54d19498679cae2bb908992d7da..7bdb9de549680d3a11f704ec242f0fbaf0da6273 100644 (file)
@@ -192,4 +192,6 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
                .enable = dsi_28nm_phy_enable,
                .disable = dsi_28nm_phy_disable,
        },
+       .io_start = { 0x4700300, 0x5800300 },
+       .num_dsi_phy = 2,
 };
index 72360cd038c02f35f2e1fdc435fb5873ae03414c..5960628ceb93e47ea55bb0fba6173e98f618581b 100644 (file)
@@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static struct drm_encoder *
-edp_connector_best_encoder(struct drm_connector *connector)
-{
-       struct edp_connector *edp_connector = to_edp_connector(connector);
-
-       DBG("");
-       return edp_connector->edp->encoder;
-}
-
 static const struct drm_connector_funcs edp_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = edp_connector_detect,
@@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = {
 static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
        .get_modes = edp_connector_get_modes,
        .mode_valid = edp_connector_mode_valid,
-       .best_encoder = edp_connector_best_encoder,
 };
 
 /* initialize connector */
index 51b9ea552f9762c91c13f6a399b7314e2e9854c4..973720792236fe4dfd68327137dbc2202501058e 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_gpio.h>
 
+#include <sound/hdmi-codec.h>
 #include "hdmi.h"
 
 void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -434,6 +435,111 @@ static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
        return gpio;
 }
 
+/*
+ * HDMI audio codec callbacks
+ */
+static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
+                                   struct hdmi_codec_daifmt *daifmt,
+                                   struct hdmi_codec_params *params)
+{
+       struct hdmi *hdmi = dev_get_drvdata(dev);
+       unsigned int chan;
+       unsigned int channel_allocation = 0;
+       unsigned int rate;
+       unsigned int level_shift  = 0; /* 0dB */
+       bool down_mix = false;
+
+       dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+                params->sample_width, params->cea.channels);
+
+       switch (params->cea.channels) {
+       case 2:
+               /* FR and FL speakers */
+               channel_allocation  = 0;
+               chan = MSM_HDMI_AUDIO_CHANNEL_2;
+               break;
+       case 4:
+               /* FC, LFE, FR and FL speakers */
+               channel_allocation  = 0x3;
+               chan = MSM_HDMI_AUDIO_CHANNEL_4;
+               break;
+       case 6:
+               /* RR, RL, FC, LFE, FR and FL speakers */
+               channel_allocation  = 0x0B;
+               chan = MSM_HDMI_AUDIO_CHANNEL_6;
+               break;
+       case 8:
+               /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
+               channel_allocation  = 0x1F;
+               chan = MSM_HDMI_AUDIO_CHANNEL_8;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (params->sample_rate) {
+       case 32000:
+               rate = HDMI_SAMPLE_RATE_32KHZ;
+               break;
+       case 44100:
+               rate = HDMI_SAMPLE_RATE_44_1KHZ;
+               break;
+       case 48000:
+               rate = HDMI_SAMPLE_RATE_48KHZ;
+               break;
+       case 88200:
+               rate = HDMI_SAMPLE_RATE_88_2KHZ;
+               break;
+       case 96000:
+               rate = HDMI_SAMPLE_RATE_96KHZ;
+               break;
+       case 176400:
+               rate = HDMI_SAMPLE_RATE_176_4KHZ;
+               break;
+       case 192000:
+               rate = HDMI_SAMPLE_RATE_192KHZ;
+               break;
+       default:
+               dev_err(dev, "rate[%d] not supported!\n",
+                       params->sample_rate);
+               return -EINVAL;
+       }
+
+       msm_hdmi_audio_set_sample_rate(hdmi, rate);
+       msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
+                             level_shift, down_mix);
+
+       return 0;
+}
+
+static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+       struct hdmi *hdmi = dev_get_drvdata(dev);
+
+       msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
+}
+
+static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
+       .hw_params = msm_hdmi_audio_hw_params,
+       .audio_shutdown = msm_hdmi_audio_shutdown,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+       .ops = &msm_hdmi_audio_codec_ops,
+       .max_i2s_channels = 8,
+       .i2s = 1,
+};
+
+static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
+{
+       hdmi->audio_pdev = platform_device_register_data(dev,
+                                                        HDMI_CODEC_DRV_NAME,
+                                                        PLATFORM_DEVID_AUTO,
+                                                        &codec_data,
+                                                        sizeof(codec_data));
+       return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
+}
+
 static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        struct drm_device *drm = dev_get_drvdata(master);
@@ -441,7 +547,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
        static struct hdmi_platform_config *hdmi_cfg;
        struct hdmi *hdmi;
        struct device_node *of_node = dev->of_node;
-       int i;
+       int i, err;
 
        hdmi_cfg = (struct hdmi_platform_config *)
                        of_device_get_match_data(dev);
@@ -468,6 +574,12 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
                return PTR_ERR(hdmi);
        priv->hdmi = hdmi;
 
+       err = msm_hdmi_register_audio_driver(hdmi, dev);
+       if (err) {
+               DRM_ERROR("Failed to attach an audio codec %d\n", err);
+               hdmi->audio_pdev = NULL;
+       }
+
        return 0;
 }
 
@@ -477,6 +589,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
        struct drm_device *drm = dev_get_drvdata(master);
        struct msm_drm_private *priv = drm->dev_private;
        if (priv->hdmi) {
+               if (priv->hdmi->audio_pdev)
+                       platform_device_unregister(priv->hdmi->audio_pdev);
+
                msm_hdmi_destroy(priv->hdmi);
                priv->hdmi = NULL;
        }
index bc7ba0bdee072056526f3f1da8cc88c981ec1150..accc9a61611d35bf9718d66fe09ce6c6e40a549f 100644 (file)
@@ -50,6 +50,7 @@ struct hdmi_hdcp_ctrl;
 struct hdmi {
        struct drm_device *dev;
        struct platform_device *pdev;
+       struct platform_device *audio_pdev;
 
        const struct hdmi_platform_config *config;
 
@@ -210,6 +211,19 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
 /*
  * audio:
  */
+/* Supported HDMI Audio channels and rates */
+#define        MSM_HDMI_AUDIO_CHANNEL_2        0
+#define        MSM_HDMI_AUDIO_CHANNEL_4        1
+#define        MSM_HDMI_AUDIO_CHANNEL_6        2
+#define        MSM_HDMI_AUDIO_CHANNEL_8        3
+
+#define        HDMI_SAMPLE_RATE_32KHZ          0
+#define        HDMI_SAMPLE_RATE_44_1KHZ        1
+#define        HDMI_SAMPLE_RATE_48KHZ          2
+#define        HDMI_SAMPLE_RATE_88_2KHZ        3
+#define        HDMI_SAMPLE_RATE_96KHZ          4
+#define        HDMI_SAMPLE_RATE_176_4KHZ       5
+#define        HDMI_SAMPLE_RATE_192KHZ         6
 
 int msm_hdmi_audio_update(struct hdmi *hdmi);
 int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
index b15d72683112dc9bcf37f1cf1a8d5983a663c889..a2515b466ce51d771f7966e9e574deecd0bcdf19 100644 (file)
@@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
        return 0;
 }
 
-static struct drm_encoder *
-msm_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
-       struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
-       return hdmi_connector->hdmi->encoder;
-}
-
 static const struct drm_connector_funcs hdmi_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = hdmi_connector_detect,
@@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
 static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
        .get_modes = msm_hdmi_connector_get_modes,
        .mode_valid = msm_hdmi_connector_mode_valid,
-       .best_encoder = msm_hdmi_connector_best_encoder,
 };
 
 /* initialize connector */
index 0baaaaabd0027792fcba54d6f9ad0a157eada5ca..6e767979aab3fe5d258849b7aaddcb60f94f7d5a 100644 (file)
@@ -1430,7 +1430,7 @@ struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
 
 void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
 {
-       if (hdmi && hdmi->hdcp_ctrl) {
+       if (hdmi) {
                kfree(hdmi->hdcp_ctrl);
                hdmi->hdcp_ctrl = NULL;
        }
index 35ad78a1dc1c920f19692b0420368f852cad0096..24258e3025e31a3f8431eb78fcea5537a9dc2450 100644 (file)
@@ -23,7 +23,6 @@
 
 struct mdp4_dtv_encoder {
        struct drm_encoder base;
-       struct clk *src_clk;
        struct clk *hdmi_clk;
        struct clk *mdp_clk;
        unsigned long int pixclock;
@@ -179,7 +178,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
         */
        mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
 
-       clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
        clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
        clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
 
@@ -208,19 +206,21 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
 
        bs_set(mdp4_dtv_encoder, 1);
 
-       DBG("setting src_clk=%lu", pc);
+       DBG("setting mdp_clk=%lu", pc);
 
-       ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+       ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
        if (ret)
-               dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
-       clk_prepare_enable(mdp4_dtv_encoder->src_clk);
-       ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
-       if (ret)
-               dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+               dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+                       pc, ret);
+
        ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
        if (ret)
                dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
 
+       ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+       if (ret)
+               dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+
        mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
 
        mdp4_dtv_encoder->enabled = true;
@@ -235,7 +235,7 @@ static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
 long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
 {
        struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
-       return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
+       return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
 }
 
 /* initialize encoder */
@@ -257,13 +257,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
                         DRM_MODE_ENCODER_TMDS, NULL);
        drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
 
-       mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
-       if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
-               dev_err(dev->dev, "failed to get src_clk\n");
-               ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
-               goto fail;
-       }
-
        mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
        if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
                dev_err(dev->dev, "failed to get hdmi_clk\n");
@@ -271,9 +264,9 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
                goto fail;
        }
 
-       mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
+       mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
        if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
-               dev_err(dev->dev, "failed to get mdp_clk\n");
+               dev_err(dev->dev, "failed to get tv_clk\n");
                ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
                goto fail;
        }
index 67442d50a6c2161e5ea676c8b8e8983230f456fa..7b39e89fbc2b488adcfc90745732539e57669dad 100644 (file)
@@ -106,31 +106,27 @@ out:
 static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 {
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
-       int i, ncrtcs = state->dev->mode_config.num_crtc;
+       int i;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
 
        mdp4_enable(mdp4_kms);
 
        /* see 119ecb7fd */
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-               if (!crtc)
-                       continue;
+       for_each_crtc_in_state(state, crtc, crtc_state, i)
                drm_crtc_vblank_get(crtc);
-       }
 }
 
 static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 {
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
-       int i, ncrtcs = state->dev->mode_config.num_crtc;
+       int i;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
 
        /* see 119ecb7fd */
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-               if (!crtc)
-                       continue;
+       for_each_crtc_in_state(state, crtc, crtc_state, i)
                drm_crtc_vblank_put(crtc);
-       }
 
        mdp4_disable(mdp4_kms);
 }
@@ -162,6 +158,7 @@ static const char * const iommu_ports[] = {
 static void mdp4_destroy(struct msm_kms *kms)
 {
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       struct device *dev = mdp4_kms->dev->dev;
        struct msm_mmu *mmu = mdp4_kms->mmu;
 
        if (mmu) {
@@ -171,8 +168,11 @@ static void mdp4_destroy(struct msm_kms *kms)
 
        if (mdp4_kms->blank_cursor_iova)
                msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
-       if (mdp4_kms->blank_cursor_bo)
-               drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+       drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+
+       if (mdp4_kms->rpm_enabled)
+               pm_runtime_disable(dev);
+
        kfree(mdp4_kms);
 }
 
@@ -440,7 +440,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
        struct mdp4_kms *mdp4_kms;
        struct msm_kms *kms = NULL;
        struct msm_mmu *mmu;
-       int ret;
+       int irq, ret;
 
        mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
        if (!mdp4_kms) {
@@ -461,6 +461,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                goto fail;
        }
 
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               ret = irq;
+               dev_err(dev->dev, "failed to get irq: %d\n", ret);
+               goto fail;
+       }
+
+       kms->irq = irq;
+
        /* NOTE: driver for this regulator still missing upstream.. use
         * _get_exclusive() and ignore the error if it does not exist
         * (and hope that the bootloader left it on for us)
@@ -496,7 +505,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                goto fail;
        }
 
-       mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk");
+       mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
        if (IS_ERR(mdp4_kms->axi_clk)) {
                dev_err(dev->dev, "failed to get axi_clk\n");
                ret = PTR_ERR(mdp4_kms->axi_clk);
@@ -506,6 +515,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
        clk_set_rate(mdp4_kms->clk, config->max_clk);
        clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
 
+       pm_runtime_enable(dev->dev);
+       mdp4_kms->rpm_enabled = true;
+
        /* make sure things are off before attaching iommu (bootloader could
         * have left things on, in which case we'll start getting faults if
         * we don't disable):
index c5d045d5680d74f5ef91715a1c00759fe8288a13..25fb83997119c17e5b2617710a2324b7dae1d1bb 100644 (file)
@@ -47,6 +47,8 @@ struct mdp4_kms {
 
        struct mdp_irq error_handler;
 
+       bool rpm_enabled;
+
        /* empty/blank cursor bo to use when cursor is "disabled" */
        struct drm_gem_object *blank_cursor_bo;
        uint32_t blank_cursor_iova;
index 2648cd7631efc7738ea21e69aa205385a1160c26..353429b05733745977fc616944695962a6e76e62 100644 (file)
@@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static struct drm_encoder *
-mdp4_lvds_connector_best_encoder(struct drm_connector *connector)
-{
-       struct mdp4_lvds_connector *mdp4_lvds_connector =
-                       to_mdp4_lvds_connector(connector);
-       return mdp4_lvds_connector->encoder;
-}
-
 static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = mdp4_lvds_connector_detect,
@@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
 static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
        .get_modes = mdp4_lvds_connector_get_modes,
        .mode_valid = mdp4_lvds_connector_mode_valid,
-       .best_encoder = mdp4_lvds_connector_best_encoder,
 };
 
 /* initialize connector */
index b275ce11b24b48de40c9c2921a1eb3a1cff766d0..ca6ca30650a0ae6480380884600b435e2f06e0d7 100644 (file)
@@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/
 git clone https://github.com/freedreno/envytools.git
 
 The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml                 (    676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml            (  20915 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml            (  37194 bytes, from 2015-09-18 12:07:28)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml             (  27887 bytes, from 2015-10-22 16:34:52)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml            (    602 bytes, from 2015-10-22 16:35:02)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml         (   1686 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml         (    600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml           (  41472 bytes, from 2016-01-22 18:18:18)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml             (  10416 bytes, from 2015-05-20 20:03:14)
-
-Copyright (C) 2013-2015 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml   (  36965 bytes, from 2016-05-10 05:06:30)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml (   1572 bytes, from 2016-05-09 06:32:54)
+- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml      (   2849 bytes, from 2016-01-07 08:45:55)
+
+Copyright (C) 2013-2016 by the following authors:
 - Rob Clark <robdclark@gmail.com> (robclark)
 - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
 
@@ -198,118 +190,109 @@ static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
 #define MDSS_HW_INTR_STATUS_INTR_HDMI                          0x00000100
 #define MDSS_HW_INTR_STATUS_INTR_EDP                           0x00001000
 
-static inline uint32_t __offset_MDP(uint32_t idx)
-{
-       switch (idx) {
-               case 0: return (mdp5_cfg->mdp.base[0]);
-               default: return INVALID_IDX(idx);
-       }
-}
-static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-
-static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
-#define MDP5_MDP_HW_VERSION_STEP__MASK                         0x0000ffff
-#define MDP5_MDP_HW_VERSION_STEP__SHIFT                                0
-static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
+#define REG_MDP5_HW_VERSION                                    0x00000000
+#define MDP5_HW_VERSION_STEP__MASK                             0x0000ffff
+#define MDP5_HW_VERSION_STEP__SHIFT                            0
+static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val)
 {
-       return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK;
+       return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK;
 }
-#define MDP5_MDP_HW_VERSION_MINOR__MASK                                0x0fff0000
-#define MDP5_MDP_HW_VERSION_MINOR__SHIFT                       16
-static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val)
+#define MDP5_HW_VERSION_MINOR__MASK                            0x0fff0000
+#define MDP5_HW_VERSION_MINOR__SHIFT                           16
+static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val)
 {
-       return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK;
+       return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK;
 }
-#define MDP5_MDP_HW_VERSION_MAJOR__MASK                                0xf0000000
-#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT                       28
-static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val)
+#define MDP5_HW_VERSION_MAJOR__MASK                            0xf0000000
+#define MDP5_HW_VERSION_MAJOR__SHIFT                           28
+static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val)
 {
-       return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK;
+       return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK;
 }
 
-static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); }
-#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK                     0x000000ff
-#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT                    0
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
+#define REG_MDP5_DISP_INTF_SEL                                 0x00000004
+#define MDP5_DISP_INTF_SEL_INTF0__MASK                         0x000000ff
+#define MDP5_DISP_INTF_SEL_INTF0__SHIFT                                0
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
 {
-       return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
+       return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
 }
-#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK                     0x0000ff00
-#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT                    8
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF1__MASK                         0x0000ff00
+#define MDP5_DISP_INTF_SEL_INTF1__SHIFT                                8
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
 {
-       return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
+       return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
 }
-#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK                     0x00ff0000
-#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT                    16
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF2__MASK                         0x00ff0000
+#define MDP5_DISP_INTF_SEL_INTF2__SHIFT                                16
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
 {
-       return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
+       return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
 }
-#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK                     0xff000000
-#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT                    24
-static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
+#define MDP5_DISP_INTF_SEL_INTF3__MASK                         0xff000000
+#define MDP5_DISP_INTF_SEL_INTF3__SHIFT                                24
+static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
 {
-       return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
+       return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
 }
 
-static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_EN                                       0x00000010
 
-static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_STATUS                                   0x00000014
 
-static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); }
+#define REG_MDP5_INTR_CLEAR                                    0x00000018
 
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_EN                                  0x0000001c
 
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_STATUS                              0x00000020
 
-static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); }
+#define REG_MDP5_HIST_INTR_CLEAR                               0x00000024
 
-static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); }
-#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN             0x00000001
+#define REG_MDP5_SPARE_0                                       0x00000028
+#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN                 0x00000001
 
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; }
 
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; }
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK                 0x000000ff
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT                        0
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
+static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK                     0x000000ff
+#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT                    0
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
 {
-       return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
+       return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
 }
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK                 0x0000ff00
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT                        8
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK                     0x0000ff00
+#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT                    8
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
 {
-       return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
+       return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
 }
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK                 0x00ff0000
-#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT                        16
-static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK                     0x00ff0000
+#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT                    16
+static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
 {
-       return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
+       return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
 }
 
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
+static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; }
 
-static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; }
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK                 0x000000ff
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT                        0
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
+static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; }
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK                     0x000000ff
+#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT                    0
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
 {
-       return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK;
+       return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
 }
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK                 0x0000ff00
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT                        8
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK                     0x0000ff00
+#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT                    8
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
 {
-       return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK;
+       return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
 }
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK                 0x00ff0000
-#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT                        16
-static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK                     0x00ff0000
+#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT                    16
+static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
 {
-       return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK;
+       return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
 }
 
 static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
@@ -322,35 +305,35 @@ static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
                default: return INVALID_IDX(idx);
        }
 }
-static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); }
+static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
 
-static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
+static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
 
-static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; }
-#define MDP5_MDP_IGC_LUT_REG_VAL__MASK                         0x00000fff
-#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT                                0
-static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
+static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
+#define MDP5_IGC_LUT_REG_VAL__MASK                             0x00000fff
+#define MDP5_IGC_LUT_REG_VAL__SHIFT                            0
+static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
 {
-       return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK;
+       return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
 }
-#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE                      0x02000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0                    0x10000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1                    0x20000000
-#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2                    0x40000000
+#define MDP5_IGC_LUT_REG_INDEX_UPDATE                          0x02000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0                                0x10000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1                                0x20000000
+#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2                                0x40000000
 
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_EN(uint32_t i0) { return 0x000002f4 + __offset_MDP(i0); }
+#define REG_MDP5_SPLIT_DPL_EN                                  0x000002f4
 
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_UPPER(uint32_t i0) { return 0x000002f8 + __offset_MDP(i0); }
-#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL                   0x00000002
-#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN          0x00000004
-#define MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX              0x00000010
-#define MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX              0x00000100
+#define REG_MDP5_SPLIT_DPL_UPPER                               0x000002f8
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL                       0x00000002
+#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN              0x00000004
+#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX                  0x00000010
+#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX                  0x00000100
 
-static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_LOWER(uint32_t i0) { return 0x000003f0 + __offset_MDP(i0); }
-#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL                   0x00000002
-#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN          0x00000004
-#define MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC                 0x00000010
-#define MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC                 0x00000100
+#define REG_MDP5_SPLIT_DPL_LOWER                               0x000003f0
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL                       0x00000002
+#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN              0x00000004
+#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC                     0x00000010
+#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC                     0x00000100
 
 static inline uint32_t __offset_CTL(uint32_t idx)
 {
index 57f73f0c120d614e792dd0a32472c7662125c386..ac9e4cde13804f78333d5d6e101b37511949a5e8 100644 (file)
@@ -26,7 +26,6 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
        .name = "msm8x74v1",
        .mdp = {
                .count = 1,
-               .base = { 0x00100 },
                .caps = MDP_CAP_SMP |
                        0,
        },
@@ -41,12 +40,12 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
        },
        .ctl = {
                .count = 5,
-               .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+               .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
                .flush_hw_mask = 0x0003ffff,
        },
        .pipe_vig = {
                .count = 3,
-               .base = { 0x01200, 0x01600, 0x01a00 },
+               .base = { 0x01100, 0x01500, 0x01900 },
                .caps = MDP_PIPE_CAP_HFLIP |
                        MDP_PIPE_CAP_VFLIP |
                        MDP_PIPE_CAP_SCALE |
@@ -55,7 +54,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
        },
        .pipe_rgb = {
                .count = 3,
-               .base = { 0x01e00, 0x02200, 0x02600 },
+               .base = { 0x01d00, 0x02100, 0x02500 },
                .caps = MDP_PIPE_CAP_HFLIP |
                        MDP_PIPE_CAP_VFLIP |
                        MDP_PIPE_CAP_SCALE |
@@ -63,26 +62,26 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
        },
        .pipe_dma = {
                .count = 2,
-               .base = { 0x02a00, 0x02e00 },
+               .base = { 0x02900, 0x02d00 },
                .caps = MDP_PIPE_CAP_HFLIP |
                        MDP_PIPE_CAP_VFLIP |
                        0,
        },
        .lm = {
                .count = 5,
-               .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+               .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
                .nb_stages = 5,
        },
        .dspp = {
                .count = 3,
-               .base = { 0x04600, 0x04a00, 0x04e00 },
+               .base = { 0x04500, 0x04900, 0x04d00 },
        },
        .pp = {
                .count = 3,
-               .base = { 0x21b00, 0x21c00, 0x21d00 },
+               .base = { 0x21a00, 0x21b00, 0x21c00 },
        },
        .intf = {
-               .base = { 0x21100, 0x21300, 0x21500, 0x21700 },
+               .base = { 0x21000, 0x21200, 0x21400, 0x21600 },
                .connect = {
                        [0] = INTF_eDP,
                        [1] = INTF_DSI,
@@ -97,7 +96,6 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
        .name = "msm8x74",
        .mdp = {
                .count = 1,
-               .base = { 0x00100 },
                .caps = MDP_CAP_SMP |
                        0,
        },
@@ -112,48 +110,48 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
        },
        .ctl = {
                .count = 5,
-               .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+               .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
                .flush_hw_mask = 0x0003ffff,
        },
        .pipe_vig = {
                .count = 3,
-               .base = { 0x01200, 0x01600, 0x01a00 },
+               .base = { 0x01100, 0x01500, 0x01900 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
                                MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
                                MDP_PIPE_CAP_DECIMATION,
        },
        .pipe_rgb = {
                .count = 3,
-               .base = { 0x01e00, 0x02200, 0x02600 },
+               .base = { 0x01d00, 0x02100, 0x02500 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
                                MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
        },
        .pipe_dma = {
                .count = 2,
-               .base = { 0x02a00, 0x02e00 },
+               .base = { 0x02900, 0x02d00 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
        },
        .lm = {
                .count = 5,
-               .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+               .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
                .nb_stages = 5,
                .max_width = 2048,
                .max_height = 0xFFFF,
        },
        .dspp = {
                .count = 3,
-               .base = { 0x04600, 0x04a00, 0x04e00 },
+               .base = { 0x04500, 0x04900, 0x04d00 },
        },
        .ad = {
                .count = 2,
-               .base = { 0x13100, 0x13300 },
+               .base = { 0x13000, 0x13200 },
        },
        .pp = {
                .count = 3,
-               .base = { 0x12d00, 0x12e00, 0x12f00 },
+               .base = { 0x12c00, 0x12d00, 0x12e00 },
        },
        .intf = {
-               .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+               .base = { 0x12400, 0x12600, 0x12800, 0x12a00 },
                .connect = {
                        [0] = INTF_eDP,
                        [1] = INTF_DSI,
@@ -168,7 +166,6 @@ const struct mdp5_cfg_hw apq8084_config = {
        .name = "apq8084",
        .mdp = {
                .count = 1,
-               .base = { 0x00100 },
                .caps = MDP_CAP_SMP |
                        0,
        },
@@ -190,49 +187,49 @@ const struct mdp5_cfg_hw apq8084_config = {
        },
        .ctl = {
                .count = 5,
-               .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+               .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
                .flush_hw_mask = 0x003fffff,
        },
        .pipe_vig = {
                .count = 4,
-               .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+               .base = { 0x01100, 0x01500, 0x01900, 0x01d00 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
                                MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
                                MDP_PIPE_CAP_DECIMATION,
        },
        .pipe_rgb = {
                .count = 4,
-               .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+               .base = { 0x02100, 0x02500, 0x02900, 0x02d00 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
                                MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
        },
        .pipe_dma = {
                .count = 2,
-               .base = { 0x03200, 0x03600 },
+               .base = { 0x03100, 0x03500 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
        },
        .lm = {
                .count = 6,
-               .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+               .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
                .nb_stages = 5,
                .max_width = 2048,
                .max_height = 0xFFFF,
        },
        .dspp = {
                .count = 4,
-               .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+               .base = { 0x05100, 0x05500, 0x05900, 0x05d00 },
 
        },
        .ad = {
                .count = 3,
-               .base = { 0x13500, 0x13700, 0x13900 },
+               .base = { 0x13400, 0x13600, 0x13800 },
        },
        .pp = {
                .count = 4,
-               .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
+               .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 },
        },
        .intf = {
-               .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+               .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 },
                .connect = {
                        [0] = INTF_eDP,
                        [1] = INTF_DSI,
@@ -247,7 +244,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
        .name = "msm8x16",
        .mdp = {
                .count = 1,
-               .base = { 0x01000 },
+               .base = { 0x0 },
                .caps = MDP_CAP_SMP |
                        0,
        },
@@ -261,41 +258,41 @@ const struct mdp5_cfg_hw msm8x16_config = {
        },
        .ctl = {
                .count = 5,
-               .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+               .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
                .flush_hw_mask = 0x4003ffff,
        },
        .pipe_vig = {
                .count = 1,
-               .base = { 0x05000 },
+               .base = { 0x04000 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
                                MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
                                MDP_PIPE_CAP_DECIMATION,
        },
        .pipe_rgb = {
                .count = 2,
-               .base = { 0x15000, 0x17000 },
+               .base = { 0x14000, 0x16000 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
                                MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
        },
        .pipe_dma = {
                .count = 1,
-               .base = { 0x25000 },
+               .base = { 0x24000 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
        },
        .lm = {
                .count = 2, /* LM0 and LM3 */
-               .base = { 0x45000, 0x48000 },
+               .base = { 0x44000, 0x47000 },
                .nb_stages = 5,
                .max_width = 2048,
                .max_height = 0xFFFF,
        },
        .dspp = {
                .count = 1,
-               .base = { 0x55000 },
+               .base = { 0x54000 },
 
        },
        .intf = {
-               .base = { 0x00000, 0x6b800 },
+               .base = { 0x00000, 0x6a800 },
                .connect = {
                        [0] = INTF_DISABLED,
                        [1] = INTF_DSI,
@@ -308,7 +305,6 @@ const struct mdp5_cfg_hw msm8x94_config = {
        .name = "msm8x94",
        .mdp = {
                .count = 1,
-               .base = { 0x01000 },
                .caps = MDP_CAP_SMP |
                        0,
        },
@@ -330,49 +326,49 @@ const struct mdp5_cfg_hw msm8x94_config = {
        },
        .ctl = {
                .count = 5,
-               .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+               .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
                .flush_hw_mask = 0xf0ffffff,
        },
        .pipe_vig = {
                .count = 4,
-               .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+               .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
                                MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
                                MDP_PIPE_CAP_DECIMATION,
        },
        .pipe_rgb = {
                .count = 4,
-               .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+               .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
                                MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
        },
        .pipe_dma = {
                .count = 2,
-               .base = { 0x25000, 0x27000 },
+               .base = { 0x24000, 0x26000 },
                .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
        },
        .lm = {
                .count = 6,
-               .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+               .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
                .nb_stages = 8,
                .max_width = 2048,
                .max_height = 0xFFFF,
        },
        .dspp = {
                .count = 4,
-               .base = { 0x55000, 0x57000, 0x59000, 0x5b000 },
+               .base = { 0x54000, 0x56000, 0x58000, 0x5a000 },
 
        },
        .ad = {
                .count = 3,
-               .base = { 0x79000, 0x79800, 0x7a000 },
+               .base = { 0x78000, 0x78800, 0x79000 },
        },
        .pp = {
                .count = 4,
-               .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+               .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
        },
        .intf = {
-               .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+               .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
                .connect = {
                        [0] = INTF_DISABLED,
                        [1] = INTF_DSI,
@@ -387,19 +383,18 @@ const struct mdp5_cfg_hw msm8x96_config = {
        .name = "msm8x96",
        .mdp = {
                .count = 1,
-               .base = { 0x01000 },
                .caps = MDP_CAP_DSC |
                        MDP_CAP_CDM |
                        0,
        },
        .ctl = {
                .count = 5,
-               .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 },
+               .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
                .flush_hw_mask = 0xf4ffffff,
        },
        .pipe_vig = {
                .count = 4,
-               .base = { 0x05000, 0x07000, 0x09000, 0x0b000 },
+               .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
                .caps = MDP_PIPE_CAP_HFLIP      |
                        MDP_PIPE_CAP_VFLIP      |
                        MDP_PIPE_CAP_SCALE      |
@@ -410,7 +405,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
        },
        .pipe_rgb = {
                .count = 4,
-               .base = { 0x15000, 0x17000, 0x19000, 0x1b000 },
+               .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
                .caps = MDP_PIPE_CAP_HFLIP      |
                        MDP_PIPE_CAP_VFLIP      |
                        MDP_PIPE_CAP_SCALE      |
@@ -420,7 +415,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
        },
        .pipe_dma = {
                .count = 2,
-               .base = { 0x25000, 0x27000 },
+               .base = { 0x24000, 0x26000 },
                .caps = MDP_PIPE_CAP_HFLIP      |
                        MDP_PIPE_CAP_VFLIP      |
                        MDP_PIPE_CAP_SW_PIX_EXT |
@@ -428,33 +423,33 @@ const struct mdp5_cfg_hw msm8x96_config = {
        },
        .lm = {
                .count = 6,
-               .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 },
+               .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
                .nb_stages = 8,
                .max_width = 2560,
                .max_height = 0xFFFF,
        },
        .dspp = {
                .count = 2,
-               .base = { 0x55000, 0x57000 },
+               .base = { 0x54000, 0x56000 },
        },
        .ad = {
                .count = 3,
-               .base = { 0x79000, 0x79800, 0x7a000 },
+               .base = { 0x78000, 0x78800, 0x79000 },
        },
        .pp = {
                .count = 4,
-               .base = { 0x71000, 0x71800, 0x72000, 0x72800 },
+               .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
        },
        .cdm = {
                .count = 1,
-               .base = { 0x7a200 },
+               .base = { 0x79200 },
        },
        .dsc = {
                .count = 2,
-               .base = { 0x81000, 0x81400 },
+               .base = { 0x80000, 0x80400 },
        },
        .intf = {
-               .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 },
+               .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
                .connect = {
                        [0] = INTF_DISABLED,
                        [1] = INTF_DSI,
index 69094cb2810372f5a2db8a33e18c265eaf50e24d..c627ab6d00617dc357fc51e2fab74a7565ab1058 100644 (file)
@@ -272,22 +272,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
         * start signal for the slave encoder
         */
        if (intf_num == 1)
-               data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
+               data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
        else if (intf_num == 2)
-               data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
+               data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
        else
                return -EINVAL;
 
        /* Smart Panel, Sync mode */
-       data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL;
+       data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
 
        /* Make sure clocks are on when connectors calling this function. */
        mdp5_enable(mdp5_kms);
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data);
+       mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
 
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0),
-                       MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL);
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+       mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
+                  MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
+       mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
        mdp5_disable(mdp5_kms);
 
        return 0;
index 88fe256c1931a5767f08158669d62e376f8ee1f3..fa2be7ce9468768eb6737a12ec29d11fc52f49f0 100644 (file)
@@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct plane_state pstates[STAGE_MAX + 1];
        const struct mdp5_cfg_hw *hw_cfg;
+       const struct drm_plane_state *pstate;
        int cnt = 0, i;
 
        DBG("%s: check", mdp5_crtc->name);
@@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
         * and that we don't have conflicting mixer stages:
         */
        hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-       drm_atomic_crtc_state_for_each_plane(plane, state) {
-               struct drm_plane_state *pstate;
+       drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
                if (cnt >= (hw_cfg->lm.nb_stages)) {
                        dev_err(dev->dev, "too many planes!\n");
                        return -EINVAL;
                }
 
-               pstate = state->state->plane_states[drm_plane_index(plane)];
 
-               /* plane might not have changed, in which case take
-                * current state:
-                */
-               if (!pstate)
-                       pstate = plane->state;
                pstates[cnt].plane = plane;
                pstates[cnt].state = to_mdp5_plane_state(pstate);
 
@@ -496,8 +490,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
        struct mdp5_kms *mdp5_kms = get_kms(crtc);
        struct drm_gem_object *cursor_bo, *old_bo = NULL;
        uint32_t blendcfg, cursor_addr, stride;
-       int ret, bpp, lm;
-       unsigned int depth;
+       int ret, lm;
        enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
        uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
        uint32_t roi_w, roi_h;
@@ -527,8 +520,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
                return -EINVAL;
 
        lm = mdp5_crtc->lm;
-       drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
-       stride = width * (bpp >> 3);
+       stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
 
        spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
        old_bo = mdp5_crtc->cursor.scanout_bo;
index 4e81ca4f964afbb2ccecab23185227375a9f9069..d021edc3b307e3f5b37fdd3523a72fa8dec48ae9 100644 (file)
@@ -118,31 +118,31 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
        u32 intf_sel;
 
        spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
-       intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0));
+       intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
 
        switch (intf->num) {
        case 0:
-               intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK;
-               intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type);
+               intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
+               intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
                break;
        case 1:
-               intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK;
-               intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type);
+               intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
+               intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
                break;
        case 2:
-               intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK;
-               intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type);
+               intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
+               intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
                break;
        case 3:
-               intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK;
-               intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type);
+               intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
+               intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
                break;
        default:
                BUG();
                break;
        }
 
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel);
+       mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
        spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
 }
 
@@ -557,7 +557,7 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
        if (!enable) {
                ctlx->pair = NULL;
                ctly->pair = NULL;
-               mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+               mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
                return 0;
        } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
                dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
@@ -570,8 +570,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
        ctlx->pair = ctly;
        ctly->pair = ctlx;
 
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
-               MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+       mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
+                  MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
 
        return 0;
 }
index 1d95f9fd9dc7ac4474e630c3a1219d770f9842b0..fe0c22230883281f260db69a58d3637071979364 100644 (file)
@@ -322,18 +322,18 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
         * to use the master's enable signal for the slave encoder.
         */
        if (intf_num == 1)
-               data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
+               data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
        else if (intf_num == 2)
-               data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
+               data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
        else
                return -EINVAL;
 
        /* Make sure clocks are on when connectors calling this function. */
        mdp5_enable(mdp5_kms);
        /* Dumb Panel, Sync mode */
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+       mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
+       mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
+       mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
 
        mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
 
index 73bc3e312fd415619b628bb24d8abb4e33bde49a..d53e5510fd7cbcf03695e718ace36b1f661c9db6 100644 (file)
@@ -15,7 +15,6 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/irqdomain.h>
 #include <linux/irq.h>
 
 #include "msm_drv.h"
@@ -24,9 +23,9 @@
 void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
                uint32_t old_irqmask)
 {
-       mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0),
-               irqmask ^ (irqmask & old_irqmask));
-       mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask);
+       mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
+                  irqmask ^ (irqmask & old_irqmask));
+       mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
 }
 
 static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
@@ -38,8 +37,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
        mdp5_enable(mdp5_kms);
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff);
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
+       mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
+       mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
        mdp5_disable(mdp5_kms);
 }
 
@@ -55,7 +54,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
                        MDP5_IRQ_INTF2_UNDER_RUN |
                        MDP5_IRQ_INTF3_UNDER_RUN;
 
+       mdp5_enable(mdp5_kms);
        mdp_irq_register(mdp_kms, error_handler);
+       mdp5_disable(mdp5_kms);
 
        return 0;
 }
@@ -64,21 +65,22 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
        mdp5_enable(mdp5_kms);
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000);
+       mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
        mdp5_disable(mdp5_kms);
 }
 
-static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
+irqreturn_t mdp5_irq(struct msm_kms *kms)
 {
+       struct mdp_kms *mdp_kms = to_mdp_kms(kms);
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
        struct drm_device *dev = mdp5_kms->dev;
        struct msm_drm_private *priv = dev->dev_private;
        unsigned int id;
        uint32_t status, enable;
 
-       enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0));
-       status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable;
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status);
+       enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
+       status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
+       mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
 
        VERB("status=%08x", status);
 
@@ -87,29 +89,6 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
        for (id = 0; id < priv->num_crtcs; id++)
                if (status & mdp5_crtc_vblank(priv->crtcs[id]))
                        drm_handle_vblank(dev, id);
-}
-
-irqreturn_t mdp5_irq(struct msm_kms *kms)
-{
-       struct mdp_kms *mdp_kms = to_mdp_kms(kms);
-       struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
-       uint32_t intr;
-
-       intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
-
-       VERB("intr=%08x", intr);
-
-       if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
-               mdp5_irq_mdp(mdp_kms);
-               intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
-       }
-
-       while (intr) {
-               irq_hw_number_t hwirq = fls(intr) - 1;
-               generic_handle_irq(irq_find_mapping(
-                               mdp5_kms->irqcontroller.domain, hwirq));
-               intr &= ~(1 << hwirq);
-       }
 
        return IRQ_HANDLED;
 }
@@ -135,81 +114,3 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
                        mdp5_crtc_vblank(crtc), false);
        mdp5_disable(mdp5_kms);
 }
-
-/*
- * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
- * can register to get their irq's delivered
- */
-
-#define VALID_IRQS  (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
-               MDSS_HW_INTR_STATUS_INTR_DSI1 | \
-               MDSS_HW_INTR_STATUS_INTR_HDMI | \
-               MDSS_HW_INTR_STATUS_INTR_EDP)
-
-static void mdp5_hw_mask_irq(struct irq_data *irqd)
-{
-       struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
-       smp_mb__before_atomic();
-       clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
-       smp_mb__after_atomic();
-}
-
-static void mdp5_hw_unmask_irq(struct irq_data *irqd)
-{
-       struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
-       smp_mb__before_atomic();
-       set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
-       smp_mb__after_atomic();
-}
-
-static struct irq_chip mdp5_hw_irq_chip = {
-       .name           = "mdp5",
-       .irq_mask       = mdp5_hw_mask_irq,
-       .irq_unmask     = mdp5_hw_unmask_irq,
-};
-
-static int mdp5_hw_irqdomain_map(struct irq_domain *d,
-               unsigned int irq, irq_hw_number_t hwirq)
-{
-       struct mdp5_kms *mdp5_kms = d->host_data;
-
-       if (!(VALID_IRQS & (1 << hwirq)))
-               return -EPERM;
-
-       irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
-       irq_set_chip_data(irq, mdp5_kms);
-
-       return 0;
-}
-
-static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
-       .map = mdp5_hw_irqdomain_map,
-       .xlate = irq_domain_xlate_onecell,
-};
-
-
-int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
-{
-       struct device *dev = mdp5_kms->dev->dev;
-       struct irq_domain *d;
-
-       d = irq_domain_add_linear(dev->of_node, 32,
-                       &mdp5_hw_irqdomain_ops, mdp5_kms);
-       if (!d) {
-               dev_err(dev, "mdp5 irq domain add failed\n");
-               return -ENXIO;
-       }
-
-       mdp5_kms->irqcontroller.enabled_mask = 0;
-       mdp5_kms->irqcontroller.domain = d;
-
-       return 0;
-}
-
-void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
-{
-       if (mdp5_kms->irqcontroller.domain) {
-               irq_domain_remove(mdp5_kms->irqcontroller.domain);
-               mdp5_kms->irqcontroller.domain = NULL;
-       }
-}
index 484b4d15e71d372c1cef0405876c2d2574819e4a..ed7143d35b25d0c3075bdcc652c840f07958ea56 100644 (file)
@@ -16,6 +16,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/of_irq.h>
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
@@ -28,10 +29,11 @@ static const char *iommu_ports[] = {
 static int mdp5_hw_init(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-       struct drm_device *dev = mdp5_kms->dev;
+       struct platform_device *pdev = mdp5_kms->pdev;
        unsigned long flags;
 
-       pm_runtime_get_sync(dev->dev);
+       pm_runtime_get_sync(&pdev->dev);
+       mdp5_enable(mdp5_kms);
 
        /* Magic unknown register writes:
         *
@@ -58,12 +60,13 @@ static int mdp5_hw_init(struct msm_kms *kms)
         */
 
        spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
-       mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0);
+       mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
        spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
 
        mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
 
-       pm_runtime_put_sync(dev->dev);
+       mdp5_disable(mdp5_kms);
+       pm_runtime_put_sync(&pdev->dev);
 
        return 0;
 }
@@ -78,17 +81,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
 {
        int i;
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-       int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
-
-       for (i = 0; i < nplanes; i++) {
-               struct drm_plane *plane = state->planes[i];
-               struct drm_plane_state *plane_state = state->plane_states[i];
-
-               if (!plane)
-                       continue;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
 
+       for_each_plane_in_state(state, plane, plane_state, i)
                mdp5_plane_complete_commit(plane, plane_state);
-       }
 
        mdp5_disable(mdp5_kms);
 }
@@ -117,26 +114,15 @@ static int mdp5_set_split_display(struct msm_kms *kms,
                return mdp5_encoder_set_split_display(encoder, slave_encoder);
 }
 
-static void mdp5_destroy(struct msm_kms *kms)
+static void mdp5_kms_destroy(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
        struct msm_mmu *mmu = mdp5_kms->mmu;
 
-       mdp5_irq_domain_fini(mdp5_kms);
-
        if (mmu) {
                mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
                mmu->funcs->destroy(mmu);
        }
-
-       if (mdp5_kms->ctlm)
-               mdp5_ctlm_destroy(mdp5_kms->ctlm);
-       if (mdp5_kms->smp)
-               mdp5_smp_destroy(mdp5_kms->smp);
-       if (mdp5_kms->cfg)
-               mdp5_cfg_destroy(mdp5_kms->cfg);
-
-       kfree(mdp5_kms);
 }
 
 static const struct mdp_kms_funcs kms_funcs = {
@@ -154,7 +140,7 @@ static const struct mdp_kms_funcs kms_funcs = {
                .get_format      = mdp_get_format,
                .round_pixclk    = mdp5_round_pixclk,
                .set_split_display = mdp5_set_split_display,
-               .destroy         = mdp5_destroy,
+               .destroy         = mdp5_kms_destroy,
        },
        .set_irqmask         = mdp5_set_irqmask,
 };
@@ -351,13 +337,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
 
        hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 
-       /* register our interrupt-controller for hdmi/eDP/dsi/etc
-        * to use for irqs routed through mdp:
-        */
-       ret = mdp5_irq_domain_init(mdp5_kms);
-       if (ret)
-               goto fail;
-
        /* construct CRTCs and their private planes: */
        for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
                struct drm_plane *plane;
@@ -425,17 +404,17 @@ fail:
        return ret;
 }
 
-static void read_hw_revision(struct mdp5_kms *mdp5_kms,
-               uint32_t *major, uint32_t *minor)
+static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
+                                u32 *major, u32 *minor)
 {
-       uint32_t version;
+       u32 version;
 
        mdp5_enable(mdp5_kms);
-       version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION);
+       version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
        mdp5_disable(mdp5_kms);
 
-       *major = FIELD(version, MDSS_HW_VERSION_MAJOR);
-       *minor = FIELD(version, MDSS_HW_VERSION_MINOR);
+       *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
+       *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
 
        DBG("MDP5 version v%d.%d", *major, *minor);
 }
@@ -580,51 +559,146 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
 
 struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 {
-       struct platform_device *pdev = dev->platformdev;
-       struct mdp5_cfg *config;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct platform_device *pdev;
        struct mdp5_kms *mdp5_kms;
-       struct msm_kms *kms = NULL;
+       struct mdp5_cfg *config;
+       struct msm_kms *kms;
        struct msm_mmu *mmu;
-       uint32_t major, minor;
-       int i, ret;
+       int irq, i, ret;
 
-       mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
-       if (!mdp5_kms) {
-               dev_err(dev->dev, "failed to allocate kms\n");
-               ret = -ENOMEM;
+       /* priv->kms would have been populated by the MDP5 driver */
+       kms = priv->kms;
+       if (!kms)
+               return NULL;
+
+       mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+
+       mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+
+       pdev = mdp5_kms->pdev;
+
+       irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       if (irq < 0) {
+               ret = irq;
+               dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
                goto fail;
        }
 
-       spin_lock_init(&mdp5_kms->resource_lock);
+       kms->irq = irq;
 
-       mdp_kms_init(&mdp5_kms->base, &kms_funcs);
+       config = mdp5_cfg_get_config(mdp5_kms->cfg);
 
-       kms = &mdp5_kms->base.base;
+       /* make sure things are off before attaching iommu (bootloader could
+        * have left things on, in which case we'll start getting faults if
+        * we don't disable):
+        */
+       mdp5_enable(mdp5_kms);
+       for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
+               if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
+                   !config->hw->intf.base[i])
+                       continue;
+               mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
 
-       mdp5_kms->dev = dev;
+               mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
+       }
+       mdp5_disable(mdp5_kms);
+       mdelay(16);
 
-       /* mdp5_kms->mmio actually represents the MDSS base address */
-       mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
-       if (IS_ERR(mdp5_kms->mmio)) {
-               ret = PTR_ERR(mdp5_kms->mmio);
+       if (config->platform.iommu) {
+               mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+               if (IS_ERR(mmu)) {
+                       ret = PTR_ERR(mmu);
+                       dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
+                       iommu_domain_free(config->platform.iommu);
+                       goto fail;
+               }
+
+               ret = mmu->funcs->attach(mmu, iommu_ports,
+                               ARRAY_SIZE(iommu_ports));
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+                               ret);
+                       mmu->funcs->destroy(mmu);
+                       goto fail;
+               }
+       } else {
+               dev_info(&pdev->dev,
+                        "no iommu, fallback to phys contig buffers for scanout\n");
+               mmu = NULL;
+       }
+       mdp5_kms->mmu = mmu;
+
+       mdp5_kms->id = msm_register_mmu(dev, mmu);
+       if (mdp5_kms->id < 0) {
+               ret = mdp5_kms->id;
+               dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
                goto fail;
        }
 
-       mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
-       if (IS_ERR(mdp5_kms->vbif)) {
-               ret = PTR_ERR(mdp5_kms->vbif);
+       ret = modeset_init(mdp5_kms);
+       if (ret) {
+               dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
                goto fail;
        }
 
-       mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
-       if (IS_ERR(mdp5_kms->vdd)) {
-               ret = PTR_ERR(mdp5_kms->vdd);
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+       dev->mode_config.max_width = config->hw->lm.max_width;
+       dev->mode_config.max_height = config->hw->lm.max_height;
+
+       dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
+       dev->driver->get_scanout_position = mdp5_get_scanoutpos;
+       dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
+       dev->max_vblank_count = 0xffffffff;
+       dev->vblank_disable_immediate = true;
+
+       return kms;
+fail:
+       if (kms)
+               mdp5_kms_destroy(kms);
+       return ERR_PTR(ret);
+}
+
+static void mdp5_destroy(struct platform_device *pdev)
+{
+       struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+
+       if (mdp5_kms->ctlm)
+               mdp5_ctlm_destroy(mdp5_kms->ctlm);
+       if (mdp5_kms->smp)
+               mdp5_smp_destroy(mdp5_kms->smp);
+       if (mdp5_kms->cfg)
+               mdp5_cfg_destroy(mdp5_kms->cfg);
+
+       if (mdp5_kms->rpm_enabled)
+               pm_runtime_disable(&pdev->dev);
+}
+
+static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       struct mdp5_kms *mdp5_kms;
+       struct mdp5_cfg *config;
+       u32 major, minor;
+       int ret;
+
+       mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
+       if (!mdp5_kms) {
+               ret = -ENOMEM;
                goto fail;
        }
 
-       ret = regulator_enable(mdp5_kms->vdd);
-       if (ret) {
-               dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+       platform_set_drvdata(pdev, mdp5_kms);
+
+       spin_lock_init(&mdp5_kms->resource_lock);
+
+       mdp5_kms->dev = dev;
+       mdp5_kms->pdev = pdev;
+
+       mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
+       if (IS_ERR(mdp5_kms->mmio)) {
+               ret = PTR_ERR(mdp5_kms->mmio);
                goto fail;
        }
 
@@ -633,9 +707,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        if (ret)
                goto fail;
        ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
-       if (ret)
-               goto fail;
-       ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true);
        if (ret)
                goto fail;
        ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
@@ -652,9 +723,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
         * rate first, then figure out hw revision, and then set a
         * more optimal rate:
         */
-       clk_set_rate(mdp5_kms->src_clk, 200000000);
+       clk_set_rate(mdp5_kms->core_clk, 200000000);
 
-       read_hw_revision(mdp5_kms, &major, &minor);
+       pm_runtime_enable(&pdev->dev);
+       mdp5_kms->rpm_enabled = true;
+
+       read_mdp_hw_revision(mdp5_kms, &major, &minor);
 
        mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
        if (IS_ERR(mdp5_kms->cfg)) {
@@ -667,7 +741,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        mdp5_kms->caps = config->hw->mdp.caps;
 
        /* TODO: compute core clock rate at runtime */
-       clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
+       clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
 
        /*
         * Some chipsets have a Shared Memory Pool (SMP), while others
@@ -690,73 +764,76 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
                goto fail;
        }
 
-       /* make sure things are off before attaching iommu (bootloader could
-        * have left things on, in which case we'll start getting faults if
-        * we don't disable):
-        */
-       mdp5_enable(mdp5_kms);
-       for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
-               if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
-                               !config->hw->intf.base[i])
-                       continue;
-               mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
+       /* set uninit-ed kms */
+       priv->kms = &mdp5_kms->base.base;
 
-               mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
-       }
-       mdp5_disable(mdp5_kms);
-       mdelay(16);
+       return 0;
+fail:
+       mdp5_destroy(pdev);
+       return ret;
+}
 
-       if (config->platform.iommu) {
-               mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
-               if (IS_ERR(mmu)) {
-                       ret = PTR_ERR(mmu);
-                       dev_err(dev->dev, "failed to init iommu: %d\n", ret);
-                       iommu_domain_free(config->platform.iommu);
-                       goto fail;
-               }
+static int mdp5_bind(struct device *dev, struct device *master, void *data)
+{
+       struct drm_device *ddev = dev_get_drvdata(master);
+       struct platform_device *pdev = to_platform_device(dev);
 
-               ret = mmu->funcs->attach(mmu, iommu_ports,
-                               ARRAY_SIZE(iommu_ports));
-               if (ret) {
-                       dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
-                       mmu->funcs->destroy(mmu);
-                       goto fail;
-               }
-       } else {
-               dev_info(dev->dev, "no iommu, fallback to phys "
-                               "contig buffers for scanout\n");
-               mmu = NULL;
-       }
-       mdp5_kms->mmu = mmu;
+       DBG("");
 
-       mdp5_kms->id = msm_register_mmu(dev, mmu);
-       if (mdp5_kms->id < 0) {
-               ret = mdp5_kms->id;
-               dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
-               goto fail;
-       }
+       return mdp5_init(pdev, ddev);
+}
 
-       ret = modeset_init(mdp5_kms);
-       if (ret) {
-               dev_err(dev->dev, "modeset_init failed: %d\n", ret);
-               goto fail;
-       }
+static void mdp5_unbind(struct device *dev, struct device *master,
+                       void *data)
+{
+       struct platform_device *pdev = to_platform_device(dev);
 
-       dev->mode_config.min_width = 0;
-       dev->mode_config.min_height = 0;
-       dev->mode_config.max_width = config->hw->lm.max_width;
-       dev->mode_config.max_height = config->hw->lm.max_height;
+       mdp5_destroy(pdev);
+}
 
-       dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
-       dev->driver->get_scanout_position = mdp5_get_scanoutpos;
-       dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
-       dev->max_vblank_count = 0xffffffff;
-       dev->vblank_disable_immediate = true;
+static const struct component_ops mdp5_ops = {
+       .bind   = mdp5_bind,
+       .unbind = mdp5_unbind,
+};
 
-       return kms;
+static int mdp5_dev_probe(struct platform_device *pdev)
+{
+       DBG("");
+       return component_add(&pdev->dev, &mdp5_ops);
+}
 
-fail:
-       if (kms)
-               mdp5_destroy(kms);
-       return ERR_PTR(ret);
+static int mdp5_dev_remove(struct platform_device *pdev)
+{
+       DBG("");
+       component_del(&pdev->dev, &mdp5_ops);
+       return 0;
+}
+
+static const struct of_device_id mdp5_dt_match[] = {
+       { .compatible = "qcom,mdp5", },
+       /* to support downstream DT files */
+       { .compatible = "qcom,mdss_mdp", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, mdp5_dt_match);
+
+static struct platform_driver mdp5_driver = {
+       .probe = mdp5_dev_probe,
+       .remove = mdp5_dev_remove,
+       .driver = {
+               .name = "msm_mdp",
+               .of_match_table = mdp5_dt_match,
+       },
+};
+
+void __init msm_mdp_register(void)
+{
+       DBG("");
+       platform_driver_register(&mdp5_driver);
+}
+
+void __exit msm_mdp_unregister(void)
+{
+       DBG("");
+       platform_driver_unregister(&mdp5_driver);
 }
index 9a25898239d3d06d62519f1d990f10be081da559..03738927be1010501f1275b5286712c92e1a23ed 100644 (file)
@@ -31,6 +31,8 @@ struct mdp5_kms {
 
        struct drm_device *dev;
 
+       struct platform_device *pdev;
+
        struct mdp5_cfg_handler *cfg;
        uint32_t caps;  /* MDP capabilities (MDP_CAP_XXX bits) */
 
@@ -43,29 +45,23 @@ struct mdp5_kms {
        struct mdp5_ctl_manager *ctlm;
 
        /* io/register spaces: */
-       void __iomem *mmio, *vbif;
-
-       struct regulator *vdd;
+       void __iomem *mmio;
 
        struct clk *axi_clk;
        struct clk *ahb_clk;
-       struct clk *src_clk;
        struct clk *core_clk;
        struct clk *lut_clk;
        struct clk *vsync_clk;
 
        /*
         * lock to protect access to global resources: ie., following register:
-        *      - REG_MDP5_MDP_DISP_INTF_SEL
+        *      - REG_MDP5_DISP_INTF_SEL
         */
        spinlock_t resource_lock;
 
-       struct mdp_irq error_handler;
+       bool rpm_enabled;
 
-       struct {
-               volatile unsigned long enabled_mask;
-               struct irq_domain *domain;
-       } irqcontroller;
+       struct mdp_irq error_handler;
 };
 #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
new file mode 100644 (file)
index 0000000..d444a69
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+
+#include "msm_drv.h"
+#include "mdp5_kms.h"
+
+/*
+ * If needed, this can become more specific: something like struct mdp5_mdss,
+ * which contains a 'struct msm_mdss base' member.
+ */
+struct msm_mdss {
+       struct drm_device *dev;
+
+       void __iomem *mmio, *vbif;
+
+       struct regulator *vdd;
+
+       struct {
+               volatile unsigned long enabled_mask;
+               struct irq_domain *domain;
+       } irqcontroller;
+};
+
+static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+{
+       msm_writel(data, mdss->mmio + reg);
+}
+
+static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+{
+       return msm_readl(mdss->mmio + reg);
+}
+
+static irqreturn_t mdss_irq(int irq, void *arg)
+{
+       struct msm_mdss *mdss = arg;
+       u32 intr;
+
+       intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+
+       VERB("intr=%08x", intr);
+
+       while (intr) {
+               irq_hw_number_t hwirq = fls(intr) - 1;
+
+               generic_handle_irq(irq_find_mapping(
+                               mdss->irqcontroller.domain, hwirq));
+               intr &= ~(1 << hwirq);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
+ * can register to get their irq's delivered
+ */
+
+#define VALID_IRQS  (MDSS_HW_INTR_STATUS_INTR_MDP | \
+               MDSS_HW_INTR_STATUS_INTR_DSI0 | \
+               MDSS_HW_INTR_STATUS_INTR_DSI1 | \
+               MDSS_HW_INTR_STATUS_INTR_HDMI | \
+               MDSS_HW_INTR_STATUS_INTR_EDP)
+
+static void mdss_hw_mask_irq(struct irq_data *irqd)
+{
+       struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+
+       smp_mb__before_atomic();
+       clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+       smp_mb__after_atomic();
+}
+
+static void mdss_hw_unmask_irq(struct irq_data *irqd)
+{
+       struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+
+       smp_mb__before_atomic();
+       set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+       smp_mb__after_atomic();
+}
+
+static struct irq_chip mdss_hw_irq_chip = {
+       .name           = "mdss",
+       .irq_mask       = mdss_hw_mask_irq,
+       .irq_unmask     = mdss_hw_unmask_irq,
+};
+
+static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
+                                irq_hw_number_t hwirq)
+{
+       struct msm_mdss *mdss = d->host_data;
+
+       if (!(VALID_IRQS & (1 << hwirq)))
+               return -EPERM;
+
+       irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
+       irq_set_chip_data(irq, mdss);
+
+       return 0;
+}
+
+static struct irq_domain_ops mdss_hw_irqdomain_ops = {
+       .map = mdss_hw_irqdomain_map,
+       .xlate = irq_domain_xlate_onecell,
+};
+
+
+static int mdss_irq_domain_init(struct msm_mdss *mdss)
+{
+       struct device *dev = mdss->dev->dev;
+       struct irq_domain *d;
+
+       d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
+                                 mdss);
+       if (!d) {
+               dev_err(dev, "mdss irq domain add failed\n");
+               return -ENXIO;
+       }
+
+       mdss->irqcontroller.enabled_mask = 0;
+       mdss->irqcontroller.domain = d;
+
+       return 0;
+}
+
+void msm_mdss_destroy(struct drm_device *dev)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       struct msm_mdss *mdss = priv->mdss;
+
+       if (!mdss)
+               return;
+
+       irq_domain_remove(mdss->irqcontroller.domain);
+       mdss->irqcontroller.domain = NULL;
+
+       regulator_disable(mdss->vdd);
+
+       pm_runtime_put_sync(dev->dev);
+
+       pm_runtime_disable(dev->dev);
+}
+
+int msm_mdss_init(struct drm_device *dev)
+{
+       struct platform_device *pdev = dev->platformdev;
+       struct msm_drm_private *priv = dev->dev_private;
+       struct msm_mdss *mdss;
+       int ret;
+
+       DBG("");
+
+       if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
+               return 0;
+
+       mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
+       if (!mdss) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       mdss->dev = dev;
+
+       mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+       if (IS_ERR(mdss->mmio)) {
+               ret = PTR_ERR(mdss->mmio);
+               goto fail;
+       }
+
+       mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+       if (IS_ERR(mdss->vbif)) {
+               ret = PTR_ERR(mdss->vbif);
+               goto fail;
+       }
+
+       /* Regulator to enable GDSCs in downstream kernels */
+       mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+       if (IS_ERR(mdss->vdd)) {
+               ret = PTR_ERR(mdss->vdd);
+               goto fail;
+       }
+
+       ret = regulator_enable(mdss->vdd);
+       if (ret) {
+               dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+                       ret);
+               goto fail;
+       }
+
+       ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+                              mdss_irq, 0, "mdss_isr", mdss);
+       if (ret) {
+               dev_err(dev->dev, "failed to init irq: %d\n", ret);
+               goto fail_irq;
+       }
+
+       ret = mdss_irq_domain_init(mdss);
+       if (ret) {
+               dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+               goto fail_irq;
+       }
+
+       priv->mdss = mdss;
+
+       pm_runtime_enable(dev->dev);
+
+       /*
+        * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
+        * domain. Remove this once runtime PM is adapted for all the devices.
+        */
+       pm_runtime_get_sync(dev->dev);
+
+       return 0;
+fail_irq:
+       regulator_disable(mdss->vdd);
+fail:
+       return ret;
+}
index 6f425c25d9fe4f18407edc6480b328fa779f439c..27d7b55b52c981d13a1cc1063f165b21be735ec9 100644 (file)
@@ -42,7 +42,7 @@
  *
  *     configured:
  *     The block is allocated to some client, and assigned to that
- *     client in MDP5_MDP_SMP_ALLOC registers.
+ *     client in MDP5_SMP_ALLOC registers.
  *
  *     inuse:
  *     The block is being actively used by a client.
@@ -59,7 +59,7 @@
  *     mdp5_smp_commit.
  *
  *  2) mdp5_smp_configure():
- *     As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
+ *     As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
  *     are configured for the union(pending, inuse)
  *     Current pending is copied to configured.
  *     It is assumed that mdp5_smp_request and mdp5_smp_configure not run
@@ -311,25 +311,25 @@ static void update_smp_state(struct mdp5_smp *smp,
                int idx = blk / 3;
                int fld = blk % 3;
 
-               val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx));
+               val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
 
                switch (fld) {
                case 0:
-                       val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK;
-                       val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid);
+                       val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
+                       val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
                        break;
                case 1:
-                       val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK;
-                       val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid);
+                       val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
+                       val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
                        break;
                case 2:
-                       val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK;
-                       val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid);
+                       val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
+                       val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
                        break;
                }
 
-               mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val);
-               mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val);
+               mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
+               mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
        }
 }
 
index e3892c263f2791b82a6a93fc4e2100710631265a..4a8a6f1f1151a7a824ce0da479e8cd3cb956e9a6 100644 (file)
@@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
                struct drm_atomic_state *old_state)
 {
        struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
        struct msm_drm_private *priv = old_state->dev->dev_private;
        struct msm_kms *kms = priv->kms;
-       int ncrtcs = old_state->dev->mode_config.num_crtc;
        int i;
 
-       for (i = 0; i < ncrtcs; i++) {
-               crtc = old_state->crtcs[i];
-
-               if (!crtc)
-                       continue;
-
+       for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
                if (!crtc->state->enable)
                        continue;
 
@@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev,
                struct drm_atomic_state *state, bool nonblock)
 {
        struct msm_drm_private *priv = dev->dev_private;
-       int nplanes = dev->mode_config.num_total_plane;
-       int ncrtcs = dev->mode_config.num_crtc;
        struct msm_commit *c;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
        int i, ret;
 
        ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev,
        /*
         * Figure out what crtcs we have:
         */
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-               if (!crtc)
-                       continue;
-               c->crtc_mask |= (1 << drm_crtc_index(crtc));
-       }
+       for_each_crtc_in_state(state, crtc, crtc_state, i)
+               c->crtc_mask |= drm_crtc_mask(crtc);
 
        /*
         * Figure out what fence to wait for:
         */
-       for (i = 0; i < nplanes; i++) {
-               struct drm_plane *plane = state->planes[i];
-               struct drm_plane_state *new_state = state->plane_states[i];
-
-               if (!plane)
-                       continue;
-
-               if ((plane->state->fb != new_state->fb) && new_state->fb) {
-                       struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
+       for_each_plane_in_state(state, plane, plane_state, i) {
+               if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
+                       struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
                        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-                       new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+                       plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
                }
        }
 
@@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev,
         * the software side now.
         */
 
-       drm_atomic_helper_swap_state(dev, state);
+       drm_atomic_helper_swap_state(state, true);
 
        /*
         * Everything below can be run asynchronously without the need to grab
index 9c654092ef7891c8e3edab6e4492373adb72caa7..26f859ec24b3888ac6b5d45ab01405c9b9a31cbb 100644 (file)
 #include "msm_gpu.h"
 #include "msm_kms.h"
 
+
+/*
+ * MSM driver version:
+ * - 1.0.0 - initial interface
+ * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
+ */
+#define MSM_VERSION_MAJOR      1
+#define MSM_VERSION_MINOR      1
+#define MSM_VERSION_PATCHLEVEL 0
+
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
        struct msm_drm_private *priv = dev->dev_private;
@@ -195,9 +205,9 @@ static int msm_drm_uninit(struct device *dev)
                kfree(vbl_ev);
        }
 
-       drm_kms_helper_poll_fini(ddev);
+       msm_gem_shrinker_cleanup(ddev);
 
-       drm_connector_unregister_all(ddev);
+       drm_kms_helper_poll_fini(ddev);
 
        drm_dev_unregister(ddev);
 
@@ -217,10 +227,8 @@ static int msm_drm_uninit(struct device *dev)
        flush_workqueue(priv->atomic_wq);
        destroy_workqueue(priv->atomic_wq);
 
-       if (kms) {
-               pm_runtime_disable(dev);
+       if (kms)
                kms->funcs->destroy(kms);
-       }
 
        if (gpu) {
                mutex_lock(&ddev->struct_mutex);
@@ -239,6 +247,8 @@ static int msm_drm_uninit(struct device *dev)
 
        component_unbind_all(dev, ddev);
 
+       msm_mdss_destroy(ddev);
+
        ddev->dev_private = NULL;
        drm_dev_unref(ddev);
 
@@ -284,6 +294,7 @@ static int msm_init_vram(struct drm_device *dev)
        if (node) {
                struct resource r;
                ret = of_address_to_resource(node, 0, &r);
+               of_node_put(node);
                if (ret)
                        return ret;
                size = r.end - r.start;
@@ -352,6 +363,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        }
 
        ddev->dev_private = priv;
+       priv->dev = ddev;
+
+       ret = msm_mdss_init(ddev);
+       if (ret) {
+               kfree(priv);
+               drm_dev_unref(ddev);
+               return ret;
+       }
 
        priv->wq = alloc_ordered_workqueue("msm", 0);
        priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
@@ -367,6 +386,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        /* Bind all our sub-components: */
        ret = component_bind_all(dev, ddev);
        if (ret) {
+               msm_mdss_destroy(ddev);
                kfree(priv);
                drm_dev_unref(ddev);
                return ret;
@@ -376,9 +396,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        if (ret)
                goto fail;
 
+       msm_gem_shrinker_init(ddev);
+
        switch (get_mdp_ver(pdev)) {
        case 4:
                kms = mdp4_kms_init(ddev);
+               priv->kms = kms;
                break;
        case 5:
                kms = mdp5_kms_init(ddev);
@@ -400,10 +423,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                goto fail;
        }
 
-       priv->kms = kms;
-
        if (kms) {
-               pm_runtime_enable(dev);
                ret = kms->funcs->hw_init(kms);
                if (ret) {
                        dev_err(dev, "kms hw init failed: %d\n", ret);
@@ -419,24 +439,20 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                goto fail;
        }
 
-       pm_runtime_get_sync(dev);
-       ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
-       pm_runtime_put_sync(dev);
-       if (ret < 0) {
-               dev_err(dev, "failed to install IRQ handler\n");
-               goto fail;
+       if (kms) {
+               pm_runtime_get_sync(dev);
+               ret = drm_irq_install(ddev, kms->irq);
+               pm_runtime_put_sync(dev);
+               if (ret < 0) {
+                       dev_err(dev, "failed to install IRQ handler\n");
+                       goto fail;
+               }
        }
 
        ret = drm_dev_register(ddev, 0);
        if (ret)
                goto fail;
 
-       ret = drm_connector_register_all(ddev);
-       if (ret) {
-               dev_err(dev, "failed to register connectors\n");
-               goto fail;
-       }
-
        drm_mode_config_reset(ddev);
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -690,6 +706,44 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
        return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
 }
 
+static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_msm_gem_madvise *args = data;
+       struct drm_gem_object *obj;
+       int ret;
+
+       switch (args->madv) {
+       case MSM_MADV_DONTNEED:
+       case MSM_MADV_WILLNEED:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       obj = drm_gem_object_lookup(file, args->handle);
+       if (!obj) {
+               ret = -ENOENT;
+               goto unlock;
+       }
+
+       ret = msm_gem_madvise(obj, args->madv);
+       if (ret >= 0) {
+               args->retained = ret;
+               ret = 0;
+       }
+
+       drm_gem_object_unreference(obj);
+
+unlock:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
+}
+
 static const struct drm_ioctl_desc msm_ioctls[] = {
        DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
@@ -698,6 +752,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
        DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
 static const struct vm_operations_struct vm_ops = {
@@ -730,7 +785,6 @@ static struct drm_driver msm_driver = {
        .open               = msm_open,
        .preclose           = msm_preclose,
        .lastclose          = msm_lastclose,
-       .set_busid          = drm_platform_set_busid,
        .irq_handler        = msm_irq,
        .irq_preinstall     = msm_irq_preinstall,
        .irq_postinstall    = msm_irq_postinstall,
@@ -764,8 +818,9 @@ static struct drm_driver msm_driver = {
        .name               = "msm",
        .desc               = "MSM Snapdragon DRM",
        .date               = "20130625",
-       .major              = 1,
-       .minor              = 0,
+       .major              = MSM_VERSION_MAJOR,
+       .minor              = MSM_VERSION_MINOR,
+       .patchlevel         = MSM_VERSION_PATCHLEVEL,
 };
 
 #ifdef CONFIG_PM_SLEEP
@@ -805,22 +860,146 @@ static int compare_of(struct device *dev, void *data)
        return dev->of_node == data;
 }
 
-static int add_components(struct device *dev, struct component_match **matchptr,
-               const char *name)
+/*
+ * Identify what components need to be added by parsing what remote-endpoints
+ * our MDP output ports are connected to. In the case of LVDS on MDP4, there
+ * is no external component that we need to add since LVDS is within MDP4
+ * itself.
+ */
+static int add_components_mdp(struct device *mdp_dev,
+                             struct component_match **matchptr)
 {
-       struct device_node *np = dev->of_node;
-       unsigned i;
+       struct device_node *np = mdp_dev->of_node;
+       struct device_node *ep_node;
+       struct device *master_dev;
+
+       /*
+        * on MDP4 based platforms, the MDP platform device is the component
+        * master that adds other display interface components to itself.
+        *
+        * on MDP5 based platforms, the MDSS platform device is the component
+        * master that adds MDP5 and other display interface components to
+        * itself.
+        */
+       if (of_device_is_compatible(np, "qcom,mdp4"))
+               master_dev = mdp_dev;
+       else
+               master_dev = mdp_dev->parent;
 
-       for (i = 0; ; i++) {
-               struct device_node *node;
+       for_each_endpoint_of_node(np, ep_node) {
+               struct device_node *intf;
+               struct of_endpoint ep;
+               int ret;
 
-               node = of_parse_phandle(np, name, i);
-               if (!node)
-                       break;
+               ret = of_graph_parse_endpoint(ep_node, &ep);
+               if (ret) {
+                       dev_err(mdp_dev, "unable to parse port endpoint\n");
+                       of_node_put(ep_node);
+                       return ret;
+               }
+
+               /*
+                * The LCDC/LVDS port on MDP4 is a speacial case where the
+                * remote-endpoint isn't a component that we need to add
+                */
+               if (of_device_is_compatible(np, "qcom,mdp4") &&
+                   ep.port == 0) {
+                       of_node_put(ep_node);
+                       continue;
+               }
+
+               /*
+                * It's okay if some of the ports don't have a remote endpoint
+                * specified. It just means that the port isn't connected to
+                * any external interface.
+                */
+               intf = of_graph_get_remote_port_parent(ep_node);
+               if (!intf) {
+                       of_node_put(ep_node);
+                       continue;
+               }
+
+               component_match_add(master_dev, matchptr, compare_of, intf);
+
+               of_node_put(intf);
+               of_node_put(ep_node);
+       }
+
+       return 0;
+}
+
+static int compare_name_mdp(struct device *dev, void *data)
+{
+       return (strstr(dev_name(dev), "mdp") != NULL);
+}
+
+static int add_display_components(struct device *dev,
+                                 struct component_match **matchptr)
+{
+       struct device *mdp_dev;
+       int ret;
+
+       /*
+        * MDP5 based devices don't have a flat hierarchy. There is a top level
+        * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
+        * children devices, find the MDP5 node, and then add the interfaces
+        * to our components list.
+        */
+       if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+               ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+               if (ret) {
+                       dev_err(dev, "failed to populate children devices\n");
+                       return ret;
+               }
+
+               mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
+               if (!mdp_dev) {
+                       dev_err(dev, "failed to find MDSS MDP node\n");
+                       of_platform_depopulate(dev);
+                       return -ENODEV;
+               }
+
+               put_device(mdp_dev);
 
-               component_match_add(dev, matchptr, compare_of, node);
+               /* add the MDP component itself */
+               component_match_add(dev, matchptr, compare_of,
+                                   mdp_dev->of_node);
+       } else {
+               /* MDP4 */
+               mdp_dev = dev;
        }
 
+       ret = add_components_mdp(mdp_dev, matchptr);
+       if (ret)
+               of_platform_depopulate(dev);
+
+       return ret;
+}
+
+/*
+ * We don't know what's the best binding to link the gpu with the drm device.
+ * Fow now, we just hunt for all the possible gpus that we support, and add them
+ * as components.
+ */
+static const struct of_device_id msm_gpu_match[] = {
+       { .compatible = "qcom,adreno-3xx" },
+       { .compatible = "qcom,kgsl-3d0" },
+       { },
+};
+
+static int add_gpu_components(struct device *dev,
+                             struct component_match **matchptr)
+{
+       struct device_node *np;
+
+       np = of_find_matching_node(NULL, msm_gpu_match);
+       if (!np)
+               return 0;
+
+       component_match_add(dev, matchptr, compare_of, np);
+
+       of_node_put(np);
+
        return 0;
 }
 
@@ -846,9 +1025,15 @@ static const struct component_master_ops msm_drm_ops = {
 static int msm_pdev_probe(struct platform_device *pdev)
 {
        struct component_match *match = NULL;
+       int ret;
 
-       add_components(&pdev->dev, &match, "connectors");
-       add_components(&pdev->dev, &match, "gpus");
+       ret = add_display_components(&pdev->dev, &match);
+       if (ret)
+               return ret;
+
+       ret = add_gpu_components(&pdev->dev, &match);
+       if (ret)
+               return ret;
 
        pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
@@ -857,20 +1042,14 @@ static int msm_pdev_probe(struct platform_device *pdev)
 static int msm_pdev_remove(struct platform_device *pdev)
 {
        component_master_del(&pdev->dev, &msm_drm_ops);
+       of_platform_depopulate(&pdev->dev);
 
        return 0;
 }
 
-static const struct platform_device_id msm_id[] = {
-       { "mdp", 0 },
-       { }
-};
-
 static const struct of_device_id dt_match[] = {
-       { .compatible = "qcom,mdp4", .data = (void *) 4 },      /* mdp4 */
-       { .compatible = "qcom,mdp5", .data = (void *) 5 },      /* mdp5 */
-       /* to support downstream DT files */
-       { .compatible = "qcom,mdss_mdp", .data = (void *) 5 },  /* mdp5 */
+       { .compatible = "qcom,mdp4", .data = (void *)4 },       /* MDP4 */
+       { .compatible = "qcom,mdss", .data = (void *)5 },       /* MDP5 MDSS */
        {}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
@@ -883,12 +1062,12 @@ static struct platform_driver msm_platform_driver = {
                .of_match_table = dt_match,
                .pm     = &msm_pm_ops,
        },
-       .id_table   = msm_id,
 };
 
 static int __init msm_drm_register(void)
 {
        DBG("init");
+       msm_mdp_register();
        msm_dsi_register();
        msm_edp_register();
        msm_hdmi_register();
@@ -904,6 +1083,7 @@ static void __exit msm_drm_unregister(void)
        adreno_unregister();
        msm_edp_unregister();
        msm_dsi_unregister();
+       msm_mdp_unregister();
 }
 
 module_init(msm_drm_register);
index 5b2963f32291bd0e4c1423c893090fbdf63212dc..b4bc7f1ef717f19d4708108bc83eb40c864fdccc 100644 (file)
@@ -46,6 +46,7 @@
 struct msm_kms;
 struct msm_gpu;
 struct msm_mmu;
+struct msm_mdss;
 struct msm_rd_state;
 struct msm_perf_state;
 struct msm_gem_submit;
@@ -77,11 +78,16 @@ struct msm_vblank_ctrl {
 
 struct msm_drm_private {
 
+       struct drm_device *dev;
+
        struct msm_kms *kms;
 
        /* subordinate devices, if present: */
        struct platform_device *gpu_pdev;
 
+       /* top level MDSS wrapper device (for MDP5 only) */
+       struct msm_mdss *mdss;
+
        /* possibly this should be in the kms component, but it is
         * shared by both mdp4 and mdp5..
         */
@@ -147,6 +153,9 @@ struct msm_drm_private {
                struct drm_mm mm;
        } vram;
 
+       struct notifier_block vmap_notifier;
+       struct shrinker shrinker;
+
        struct msm_vblank_ctrl vblank_ctrl;
 };
 
@@ -165,6 +174,9 @@ void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file);
 
+void msm_gem_shrinker_init(struct drm_device *dev);
+void msm_gem_shrinker_cleanup(struct drm_device *dev);
+
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
                        struct vm_area_struct *vma);
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -189,8 +201,13 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
                struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
 void msm_gem_prime_unpin(struct drm_gem_object *obj);
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
-void *msm_gem_vaddr(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
+void *msm_gem_get_vaddr(struct drm_gem_object *obj);
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
+void msm_gem_put_vaddr(struct drm_gem_object *obj);
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
+void msm_gem_purge(struct drm_gem_object *obj);
+void msm_gem_vunmap(struct drm_gem_object *obj);
 int msm_gem_sync_object(struct drm_gem_object *obj,
                struct msm_fence_context *fctx, bool exclusive);
 void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -257,6 +274,9 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
 }
 #endif
 
+void __init msm_mdp_register(void);
+void __exit msm_mdp_unregister(void);
+
 #ifdef CONFIG_DEBUG_FS
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
index 461dc8b873f04738cb43c31df0e025a6a89fde7b..95cf8fe72ee503dbdf2295c31c8c24a3576cd582 100644 (file)
@@ -49,24 +49,16 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
 
        for (i = 0; i < n; i++) {
                struct drm_gem_object *bo = msm_fb->planes[i];
-               if (bo)
-                       drm_gem_object_unreference_unlocked(bo);
+
+               drm_gem_object_unreference_unlocked(bo);
        }
 
        kfree(msm_fb);
 }
 
-static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
-               struct drm_file *file_priv, unsigned flags, unsigned color,
-               struct drm_clip_rect *clips, unsigned num_clips)
-{
-       return 0;
-}
-
 static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
        .create_handle = msm_framebuffer_create_handle,
        .destroy = msm_framebuffer_destroy,
-       .dirty = msm_framebuffer_dirty,
 };
 
 #ifdef CONFIG_DEBUG_FS
index d9759bf3482ed54c19692f7ba108075b17529320..ffd4a338ca12e27aab8cc4a8437f84817e1b7e23 100644 (file)
@@ -158,7 +158,11 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 
        dev->mode_config.fb_base = paddr;
 
-       fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+       fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
+       if (IS_ERR(fbi->screen_base)) {
+               ret = PTR_ERR(fbi->screen_base);
+               goto fail_unlock;
+       }
        fbi->screen_size = fbdev->bo->size;
        fbi->fix.smem_start = paddr;
        fbi->fix.smem_len = fbdev->bo->size;
@@ -184,21 +188,7 @@ fail:
        return ret;
 }
 
-static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
-               u16 red, u16 green, u16 blue, int regno)
-{
-       DBG("fbdev: set gamma");
-}
-
-static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
-               u16 *red, u16 *green, u16 *blue, int regno)
-{
-       DBG("fbdev: get gamma");
-}
-
 static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
-       .gamma_set = msm_crtc_fb_gamma_set,
-       .gamma_get = msm_crtc_fb_gamma_get,
        .fb_probe = msm_fbdev_create,
 };
 
@@ -261,6 +251,7 @@ void msm_fbdev_free(struct drm_device *dev)
 
        /* this will free the backing object */
        if (fbdev->fb) {
+               msm_gem_put_vaddr(fbdev->bo);
                drm_framebuffer_unregister_private(fbdev->fb);
                drm_framebuffer_remove(fbdev->fb);
        }
index 7daf4054dd2b78ea634641bf4554fa6cb3d3a444..6cd4af443139cbdf858a0ba24baeddb01292b785 100644 (file)
@@ -276,6 +276,26 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
        return offset;
 }
 
+static void
+put_iova(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct msm_drm_private *priv = obj->dev->dev_private;
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       int id;
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+       for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+               struct msm_mmu *mmu = priv->mmus[id];
+               if (mmu && msm_obj->domain[id].iova) {
+                       uint32_t offset = msm_obj->domain[id].iova;
+                       mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
+                       msm_obj->domain[id].iova = 0;
+               }
+       }
+}
+
 /* should be called under struct_mutex.. although it can be called
  * from atomic context without struct_mutex to acquire an extra
  * iova ref if you know one is already held.
@@ -388,7 +408,7 @@ fail:
        return ret;
 }
 
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -398,19 +418,94 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
                        return ERR_CAST(pages);
                msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
                                VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+               if (msm_obj->vaddr == NULL)
+                       return ERR_PTR(-ENOMEM);
        }
+       msm_obj->vmap_count++;
        return msm_obj->vaddr;
 }
 
-void *msm_gem_vaddr(struct drm_gem_object *obj)
+void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 {
        void *ret;
        mutex_lock(&obj->dev->struct_mutex);
-       ret = msm_gem_vaddr_locked(obj);
+       ret = msm_gem_get_vaddr_locked(obj);
        mutex_unlock(&obj->dev->struct_mutex);
        return ret;
 }
 
+void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+       WARN_ON(msm_obj->vmap_count < 1);
+       msm_obj->vmap_count--;
+}
+
+void msm_gem_put_vaddr(struct drm_gem_object *obj)
+{
+       mutex_lock(&obj->dev->struct_mutex);
+       msm_gem_put_vaddr_locked(obj);
+       mutex_unlock(&obj->dev->struct_mutex);
+}
+
+/* Update madvise status, returns true if not purged, else
+ * false or -errno.
+ */
+int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+       if (msm_obj->madv != __MSM_MADV_PURGED)
+               msm_obj->madv = madv;
+
+       return (msm_obj->madv != __MSM_MADV_PURGED);
+}
+
+void msm_gem_purge(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       WARN_ON(!is_purgeable(msm_obj));
+       WARN_ON(obj->import_attach);
+
+       put_iova(obj);
+
+       msm_gem_vunmap(obj);
+
+       put_pages(obj);
+
+       msm_obj->madv = __MSM_MADV_PURGED;
+
+       drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+       drm_gem_free_mmap_offset(obj);
+
+       /* Our goal here is to return as much of the memory as
+        * is possible back to the system as we are called from OOM.
+        * To do this we must instruct the shmfs to drop all of its
+        * backing pages, *now*.
+        */
+       shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
+
+       invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
+                       0, (loff_t)-1);
+}
+
+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
+               return;
+
+       vunmap(msm_obj->vaddr);
+       msm_obj->vaddr = NULL;
+}
+
 /* must be called before _move_to_active().. */
 int msm_gem_sync_object(struct drm_gem_object *obj,
                struct msm_fence_context *fctx, bool exclusive)
@@ -462,6 +557,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
                struct msm_gpu *gpu, bool exclusive, struct fence *fence)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
        msm_obj->gpu = gpu;
        if (exclusive)
                reservation_object_add_excl_fence(msm_obj->resv, fence);
@@ -530,13 +626,27 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
        struct reservation_object_list *fobj;
        struct fence *fence;
        uint64_t off = drm_vma_node_start(&obj->vma_node);
+       const char *madv;
 
        WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 
-       seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
+       switch (msm_obj->madv) {
+       case __MSM_MADV_PURGED:
+               madv = " purged";
+               break;
+       case MSM_MADV_DONTNEED:
+               madv = " purgeable";
+               break;
+       case MSM_MADV_WILLNEED:
+       default:
+               madv = "";
+               break;
+       }
+
+       seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
                        obj->name, obj->refcount.refcount.counter,
-                       off, msm_obj->vaddr, obj->size);
+                       off, msm_obj->vaddr, obj->size, madv);
 
        rcu_read_lock();
        fobj = rcu_dereference(robj->fence);
@@ -576,9 +686,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 void msm_gem_free_object(struct drm_gem_object *obj)
 {
        struct drm_device *dev = obj->dev;
-       struct msm_drm_private *priv = obj->dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       int id;
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
@@ -587,13 +695,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 
        list_del(&msm_obj->mm_list);
 
-       for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
-               struct msm_mmu *mmu = priv->mmus[id];
-               if (mmu && msm_obj->domain[id].iova) {
-                       uint32_t offset = msm_obj->domain[id].iova;
-                       mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
-               }
-       }
+       put_iova(obj);
 
        if (obj->import_attach) {
                if (msm_obj->vaddr)
@@ -607,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
 
                drm_prime_gem_destroy(obj, msm_obj->sgt);
        } else {
-               vunmap(msm_obj->vaddr);
+               msm_gem_vunmap(obj);
                put_pages(obj);
        }
 
@@ -686,6 +788,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
                msm_obj->vram_node = (void *)&msm_obj[1];
 
        msm_obj->flags = flags;
+       msm_obj->madv = MSM_MADV_WILLNEED;
 
        if (resv) {
                msm_obj->resv = resv;
@@ -727,9 +830,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
        return obj;
 
 fail:
-       if (obj)
-               drm_gem_object_unreference(obj);
-
+       drm_gem_object_unreference(obj);
        return ERR_PTR(ret);
 }
 
@@ -772,8 +873,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
        return obj;
 
 fail:
-       if (obj)
-               drm_gem_object_unreference_unlocked(obj);
-
+       drm_gem_object_unreference_unlocked(obj);
        return ERR_PTR(ret);
 }
index 9facd4b6ffd9366a5a8105bf4a0c3d33f4a439f8..b2f13cfe945edd359a151083c1ee5d15f4a16a65 100644 (file)
@@ -29,6 +29,16 @@ struct msm_gem_object {
 
        uint32_t flags;
 
+       /**
+        * Advice: are the backing pages purgeable?
+        */
+       uint8_t madv;
+
+       /**
+        * count of active vmap'ing
+        */
+       uint8_t vmap_count;
+
        /* And object is either:
         *  inactive - on priv->inactive_list
         *  active   - on one one of the gpu's active_list..  well, at
@@ -72,7 +82,16 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
        return msm_obj->gpu != NULL;
 }
 
-#define MAX_CMDS 4
+static inline bool is_purgeable(struct msm_gem_object *msm_obj)
+{
+       return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
+                       !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
+}
+
+static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
+{
+       return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
+}
 
 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
  * associated with the cmdstream submission for synchronization (and
@@ -95,7 +114,7 @@ struct msm_gem_submit {
                uint32_t size;  /* in dwords */
                uint32_t iova;
                uint32_t idx;   /* cmdstream buffer idx in bos[] */
-       } cmd[MAX_CMDS];
+       } *cmd;  /* array of size nr_cmds */
        struct {
                uint32_t flags;
                struct msm_gem_object *obj;
index 6b90890faffec93f8586b5f2a1e251338d2a3102..60bb290700cef9c32fc2ca0dd2db229a6a7ffedf 100644 (file)
@@ -33,12 +33,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 
 void *msm_gem_prime_vmap(struct drm_gem_object *obj)
 {
-       return msm_gem_vaddr(obj);
+       return msm_gem_get_vaddr(obj);
 }
 
 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 {
-       /* TODO msm_gem_vunmap() */
+       msm_gem_put_vaddr(obj);
 }
 
 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
new file mode 100644 (file)
index 0000000..283d284
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+
+static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+{
+       if (!mutex_is_locked(mutex))
+               return false;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+       return mutex->owner == task;
+#else
+       /* Since UP may be pre-empted, we cannot assume that we own the lock */
+       return false;
+#endif
+}
+
+static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+{
+       if (!mutex_trylock(&dev->struct_mutex)) {
+               if (!mutex_is_locked_by(&dev->struct_mutex, current))
+                       return false;
+               *unlock = false;
+       } else {
+               *unlock = true;
+       }
+
+       return true;
+}
+
+
+static unsigned long
+msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+       struct msm_drm_private *priv =
+               container_of(shrinker, struct msm_drm_private, shrinker);
+       struct drm_device *dev = priv->dev;
+       struct msm_gem_object *msm_obj;
+       unsigned long count = 0;
+       bool unlock;
+
+       if (!msm_gem_shrinker_lock(dev, &unlock))
+               return 0;
+
+       list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+               if (is_purgeable(msm_obj))
+                       count += msm_obj->base.size >> PAGE_SHIFT;
+       }
+
+       if (unlock)
+               mutex_unlock(&dev->struct_mutex);
+
+       return count;
+}
+
+static unsigned long
+msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+       struct msm_drm_private *priv =
+               container_of(shrinker, struct msm_drm_private, shrinker);
+       struct drm_device *dev = priv->dev;
+       struct msm_gem_object *msm_obj;
+       unsigned long freed = 0;
+       bool unlock;
+
+       if (!msm_gem_shrinker_lock(dev, &unlock))
+               return SHRINK_STOP;
+
+       list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+               if (freed >= sc->nr_to_scan)
+                       break;
+               if (is_purgeable(msm_obj)) {
+                       msm_gem_purge(&msm_obj->base);
+                       freed += msm_obj->base.size >> PAGE_SHIFT;
+               }
+       }
+
+       if (unlock)
+               mutex_unlock(&dev->struct_mutex);
+
+       if (freed > 0)
+               pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+
+       return freed;
+}
+
+static int
+msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+       struct msm_drm_private *priv =
+               container_of(nb, struct msm_drm_private, vmap_notifier);
+       struct drm_device *dev = priv->dev;
+       struct msm_gem_object *msm_obj;
+       unsigned unmapped = 0;
+       bool unlock;
+
+       if (!msm_gem_shrinker_lock(dev, &unlock))
+               return NOTIFY_DONE;
+
+       list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+               if (is_vunmapable(msm_obj)) {
+                       msm_gem_vunmap(&msm_obj->base);
+                       /* since we don't know any better, lets bail after a few
+                        * and if necessary the shrinker will be invoked again.
+                        * Seems better than unmapping *everything*
+                        */
+                       if (++unmapped >= 15)
+                               break;
+               }
+       }
+
+       if (unlock)
+               mutex_unlock(&dev->struct_mutex);
+
+       *(unsigned long *)ptr += unmapped;
+
+       if (unmapped > 0)
+               pr_info_ratelimited("Purging %u vmaps\n", unmapped);
+
+       return NOTIFY_DONE;
+}
+
+/**
+ * msm_gem_shrinker_init - Initialize msm shrinker
+ * @dev_priv: msm device
+ *
+ * This function registers and sets up the msm shrinker.
+ */
+void msm_gem_shrinker_init(struct drm_device *dev)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       priv->shrinker.count_objects = msm_gem_shrinker_count;
+       priv->shrinker.scan_objects = msm_gem_shrinker_scan;
+       priv->shrinker.seeks = DEFAULT_SEEKS;
+       WARN_ON(register_shrinker(&priv->shrinker));
+
+       priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
+       WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
+}
+
+/**
+ * msm_gem_shrinker_cleanup - Clean up msm shrinker
+ * @dev_priv: msm device
+ *
+ * This function unregisters the msm shrinker.
+ */
+void msm_gem_shrinker_cleanup(struct drm_device *dev)
+{
+       struct msm_drm_private *priv = dev->dev_private;
+       WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
+       unregister_shrinker(&priv->shrinker);
+}
index b89ca5174863e7ab73c48117605ded1d56626e19..9766f9ae4b7d9a2593550bf7d013f988b61de46d 100644 (file)
 #define BO_PINNED   0x2000
 
 static struct msm_gem_submit *submit_create(struct drm_device *dev,
-               struct msm_gpu *gpu, int nr)
+               struct msm_gpu *gpu, int nr_bos, int nr_cmds)
 {
        struct msm_gem_submit *submit;
-       int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
+       int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
+                       (nr_cmds * sizeof(*submit->cmd));
 
        submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
        if (!submit)
@@ -40,12 +41,15 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
 
        submit->dev = dev;
        submit->gpu = gpu;
+       submit->fence = NULL;
        submit->pid = get_pid(task_pid(current));
+       submit->cmd = (void *)&submit->bos[nr_bos];
 
        /* initially, until copy_from_user() and bo lookup succeeds: */
        submit->nr_bos = 0;
        submit->nr_cmds = 0;
 
+       INIT_LIST_HEAD(&submit->node);
        INIT_LIST_HEAD(&submit->bo_list);
        ww_acquire_init(&submit->ticket, &reservation_ww_class);
 
@@ -75,6 +79,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                void __user *userptr =
                        u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
 
+               /* make sure we don't have garbage flags, in case we hit
+                * error path before flags is initialized:
+                */
+               submit->bos[i].flags = 0;
+
                ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
                if (ret) {
                        ret = -EFAULT;
@@ -272,7 +281,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
        /* For now, just map the entire thing.  Eventually we probably
         * to do it page-by-page, w/ kmap() if not vmap()d..
         */
-       ptr = msm_gem_vaddr_locked(&obj->base);
+       ptr = msm_gem_get_vaddr_locked(&obj->base);
 
        if (IS_ERR(ptr)) {
                ret = PTR_ERR(ptr);
@@ -325,6 +334,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
                last_offset = off;
        }
 
+       msm_gem_put_vaddr_locked(&obj->base);
+
        return 0;
 }
 
@@ -362,14 +373,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        if (args->pipe != MSM_PIPE_3D0)
                return -EINVAL;
 
-       if (args->nr_cmds > MAX_CMDS)
-               return -EINVAL;
-
-       submit = submit_create(dev, gpu, args->nr_bos);
-       if (!submit)
-               return -ENOMEM;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
 
-       mutex_lock(&dev->struct_mutex);
+       submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
+       if (!submit) {
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
 
        ret = submit_lookup_objects(submit, args, file);
        if (ret)
@@ -455,6 +467,7 @@ out:
        submit_cleanup(submit);
        if (ret)
                msm_gem_submit_free(submit);
+out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
index a7a0b6d9b057c88a849a4a80ed86e30adb3fb6b4..3a294d0da3a00e8222afe8134b1fe894fb1aa3d2 100644 (file)
@@ -59,10 +59,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
                return -EINVAL;
 
        for_each_sg(sgt->sgl, sg, sgt->nents, i) {
-               u32 pa = sg_phys(sg) - sg->offset;
+               dma_addr_t pa = sg_phys(sg) - sg->offset;
                size_t bytes = sg->length + sg->offset;
 
-               VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+               VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
 
                ret = iommu_map(domain, da, pa, bytes, prot);
                if (ret)
@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
                if (unmapped < bytes)
                        return unmapped;
 
-               VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+               VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
 
                BUG_ON(!PAGE_ALIGNED(bytes));
 
index e32222c3d44f3710a2c6c6068daad543c13247da..40e41e5cdbc6fad17d970cb1b645ba90cf0cbe75 100644 (file)
@@ -61,10 +61,8 @@ struct msm_kms_funcs {
 struct msm_kms {
        const struct msm_kms_funcs *funcs;
 
-       /* irq handling: */
-       bool in_irq;
-       struct list_head irq_list;    /* list of mdp4_irq */
-       uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+       /* irq number to be passed on to drm_irq_install */
+       int irq;
 };
 
 static inline void msm_kms_init(struct msm_kms *kms,
@@ -75,5 +73,7 @@ static inline void msm_kms_init(struct msm_kms *kms,
 
 struct msm_kms *mdp4_kms_init(struct drm_device *dev);
 struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+int msm_mdss_init(struct drm_device *dev);
+void msm_mdss_destroy(struct drm_device *dev);
 
 #endif /* __MSM_KMS_H__ */
index 830857c47c86147614dfb64ac962a5247b6acc14..17fe4e53e0d163dbb262bf0607ff832acdcea2dd 100644 (file)
@@ -132,7 +132,7 @@ static ssize_t perf_read(struct file *file, char __user *buf,
                size_t sz, loff_t *ppos)
 {
        struct msm_perf_state *perf = file->private_data;
-       int n = 0, ret;
+       int n = 0, ret = 0;
 
        mutex_lock(&perf->read_lock);
 
@@ -143,9 +143,10 @@ static ssize_t perf_read(struct file *file, char __user *buf,
        }
 
        n = min((int)sz, perf->buftot - perf->bufpos);
-       ret = copy_to_user(buf, &perf->buf[perf->bufpos], n);
-       if (ret)
+       if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
+               ret = -EFAULT;
                goto out;
+       }
 
        perf->bufpos += n;
        *ppos += n;
index b48f73ac63896a087938ae6225836a5d30d1a482..3a5fdfcd67aeb7d29e315ff54a1eda27bf42372a 100644 (file)
  * This bypasses drm_debugfs_create_files() mainly because we need to use
  * our own fops for a bit more control.  In particular, we don't want to
  * do anything if userspace doesn't have the debugfs file open.
+ *
+ * The module-param "rd_full", which defaults to false, enables snapshotting
+ * all (non-written) buffers in the submit, rather than just cmdstream bo's.
+ * This is useful to capture the contents of (for example) vbo's or textures,
+ * or shader programs (if not emitted inline in cmdstream).
  */
 
 #ifdef CONFIG_DEBUG_FS
 #include "msm_gpu.h"
 #include "msm_gem.h"
 
+static bool rd_full = false;
+MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
+module_param_named(rd_full, rd_full, bool, 0600);
+
 enum rd_sect_type {
        RD_NONE,
        RD_TEST,       /* ascii text */
@@ -140,9 +149,10 @@ static ssize_t rd_read(struct file *file, char __user *buf,
                goto out;
 
        n = min_t(int, sz, circ_count_to_end(&rd->fifo));
-       ret = copy_to_user(buf, fptr, n);
-       if (ret)
+       if (copy_to_user(buf, fptr, n)) {
+               ret = -EFAULT;
                goto out;
+       }
 
        fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
        *ppos += n;
@@ -277,6 +287,31 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
        kfree(rd);
 }
 
+static void snapshot_buf(struct msm_rd_state *rd,
+               struct msm_gem_submit *submit, int idx,
+               uint32_t iova, uint32_t size)
+{
+       struct msm_gem_object *obj = submit->bos[idx].obj;
+       const char *buf;
+
+       buf = msm_gem_get_vaddr_locked(&obj->base);
+       if (IS_ERR(buf))
+               return;
+
+       if (iova) {
+               buf += iova - submit->bos[idx].iova;
+       } else {
+               iova = submit->bos[idx].iova;
+               size = obj->base.size;
+       }
+
+       rd_write_section(rd, RD_GPUADDR,
+                       (uint32_t[2]){ iova, size }, 8);
+       rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
+
+       msm_gem_put_vaddr_locked(&obj->base);
+}
+
 /* called under struct_mutex */
 void msm_rd_dump_submit(struct msm_gem_submit *submit)
 {
@@ -300,24 +335,27 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
 
        rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
 
-       /* could be nice to have an option (module-param?) to snapshot
-        * all the bo's associated with the submit.  Handy to see vtx
-        * buffers, etc.  For now just the cmdstream bo's is enough.
-        */
+       if (rd_full) {
+               for (i = 0; i < submit->nr_bos; i++) {
+                       /* buffers that are written to probably don't start out
+                        * with anything interesting:
+                        */
+                       if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+                               continue;
+
+                       snapshot_buf(rd, submit, i, 0, 0);
+               }
+       }
 
        for (i = 0; i < submit->nr_cmds; i++) {
-               uint32_t idx  = submit->cmd[i].idx;
                uint32_t iova = submit->cmd[i].iova;
                uint32_t szd  = submit->cmd[i].size; /* in dwords */
-               struct msm_gem_object *obj = submit->bos[idx].obj;
-               const char *buf = msm_gem_vaddr_locked(&obj->base);
 
-               buf += iova - submit->bos[idx].iova;
-
-               rd_write_section(rd, RD_GPUADDR,
-                               (uint32_t[2]){ iova, szd * 4 }, 8);
-               rd_write_section(rd, RD_BUFFER_CONTENTS,
-                               buf, szd * 4);
+               /* snapshot cmdstream bo's (if we haven't already): */
+               if (!rd_full) {
+                       snapshot_buf(rd, submit, submit->cmd[i].idx,
+                                       submit->cmd[i].iova, szd * 4);
+               }
 
                switch (submit->cmd[i].type) {
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
index 1f14b908b22136117eb2b37179d05abfedce2b13..f326cf6a32e64473461e0b7859101f1a404c0318 100644 (file)
@@ -39,7 +39,11 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
                goto fail;
        }
 
-       ring->start = msm_gem_vaddr_locked(ring->bo);
+       ring->start = msm_gem_get_vaddr_locked(ring->bo);
+       if (IS_ERR(ring->start)) {
+               ret = PTR_ERR(ring->start);
+               goto fail;
+       }
        ring->end   = ring->start + (size / 4);
        ring->cur   = ring->start;
 
@@ -55,7 +59,9 @@ fail:
 
 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
 {
-       if (ring->bo)
+       if (ring->bo) {
+               msm_gem_put_vaddr(ring->bo);
                drm_gem_object_unreference_unlocked(ring->bo);
+       }
        kfree(ring);
 }
index 5ab13e7939db7d0085aecd41ac18be7a8ee5782f..2922a82cba8e4dd8e872ae7aae383279a32366b2 100644 (file)
@@ -3,13 +3,7 @@ config DRM_NOUVEAU
        depends on DRM && PCI
         select FW_LOADER
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
-       select FB
-       select FRAMEBUFFER_CONSOLE if !EXPERT
        select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
        select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
        select X86_PLATFORM_DEVICES if ACPI && X86
index 6f318c54da3347577555b6a636d7132214d4be6d..0cb7a18cde2615e8a074cea9da9359111e74ecc6 100644 (file)
@@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
        nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
 }
 
-static void
-nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
+static int
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
                  uint32_t size)
 {
-       int end = (start + size > 256) ? 256 : start + size, i;
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       int i;
 
-       for (i = start; i < end; i++) {
+       for (i = 0; i < size; i++) {
                nv_crtc->lut.r[i] = r[i];
                nv_crtc->lut.g[i] = g[i];
                nv_crtc->lut.b[i] = b[i];
@@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
         */
        if (!nv_crtc->base.primary->fb) {
                nv_crtc->lut.depth = 0;
-               return;
+               return 0;
        }
 
        nv_crtc_gamma_load(crtc);
+
+       return 0;
 }
 
 static int
index aea81a547e8514621ac9aaaa05f402fbe39f3233..34c0f2f675480127c0b9f5a8c3ddf18ba0a4a5b6 100644 (file)
@@ -125,18 +125,8 @@ nv04_display_destroy(struct drm_device *dev)
        struct nv04_display *disp = nv04_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_encoder *encoder;
-       struct drm_crtc *crtc;
        struct nouveau_crtc *nv_crtc;
 
-       /* Turn every CRTC off. */
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct drm_mode_set modeset = {
-                       .crtc = crtc,
-               };
-
-               drm_mode_set_config_internal(&modeset);
-       }
-
        /* Restore state */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
                encoder->enc_restore(&encoder->base.base);
index a665b78b2af5ee6f3ecef85883586061b1161c3c..434d1e29f279051fbe7086267828a5b03da0e968 100644 (file)
@@ -749,13 +749,8 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
 
                /* Disable the crtc to ensure a full modeset is
                 * performed whenever it's turned on again. */
-               if (crtc) {
-                       struct drm_mode_set modeset = {
-                               .crtc = crtc,
-                       };
-
-                       drm_mode_set_config_internal(&modeset);
-               }
+               if (crtc)
+                       drm_crtc_force_disable(crtc);
        }
 
        return 0;
index 331620a52afa69e8575728d1f4e1d6093a3d39dc..287a7d6fa4804d9dd0abe901d322f1fa3ed5ebd1 100644 (file)
@@ -29,6 +29,7 @@ struct nv_device_info_v0 {
 #define NV_DEVICE_INFO_V0_FERMI                                            0x07
 #define NV_DEVICE_INFO_V0_KEPLER                                           0x08
 #define NV_DEVICE_INFO_V0_MAXWELL                                          0x09
+#define NV_DEVICE_INFO_V0_PASCAL                                           0x0a
        __u8  family;
        __u8  pad06[2];
        __u64 ram_size;
index 982aad8fa645f5f13562a31c1fa5085cbb83e4cb..e6e9537537cf96e7eba26b32b44f6ed600e8b87e 100644 (file)
@@ -39,6 +39,7 @@
 #define KEPLER_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000a06f
 #define KEPLER_CHANNEL_GPFIFO_B                       /* cla06f.h */ 0x0000a16f
 #define MAXWELL_CHANNEL_GPFIFO_A                      /* cla06f.h */ 0x0000b06f
+#define PASCAL_CHANNEL_GPFIFO_A                       /* cla06f.h */ 0x0000c06f
 
 #define NV50_DISP                                     /* cl5070.h */ 0x00005070
 #define G82_DISP                                      /* cl5070.h */ 0x00008270
@@ -50,6 +51,8 @@
 #define GK110_DISP                                    /* cl5070.h */ 0x00009270
 #define GM107_DISP                                    /* cl5070.h */ 0x00009470
 #define GM200_DISP                                    /* cl5070.h */ 0x00009570
+#define GP100_DISP                                    /* cl5070.h */ 0x00009770
+#define GP104_DISP                                    /* cl5070.h */ 0x00009870
 
 #define NV31_MPEG                                                    0x00003174
 #define G82_MPEG                                                     0x00008274
@@ -86,6 +89,8 @@
 #define GK110_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000927d
 #define GM107_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000947d
 #define GM200_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000957d
+#define GP100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000977d
+#define GP104_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000987d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* cl507e.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* cl507e.h */ 0x0000827e
 #define MAXWELL_A                                     /* cl9097.h */ 0x0000b097
 #define MAXWELL_B                                     /* cl9097.h */ 0x0000b197
 
+#define PASCAL_A                                      /* cl9097.h */ 0x0000c097
+
 #define NV74_BSP                                                     0x000074b0
 
 #define GT212_MSVLD                                                  0x000085b1
 #define FERMI_DMA                                                    0x000090b5
 #define KEPLER_DMA_COPY_A                                            0x0000a0b5
 #define MAXWELL_DMA_COPY_A                                           0x0000b0b5
+#define PASCAL_DMA_COPY_A                                            0x0000c0b5
+#define PASCAL_DMA_COPY_B                                            0x0000c1b5
 
 #define FERMI_DECOMPRESS                                             0x000090b8
 
 #define KEPLER_COMPUTE_B                                             0x0000a1c0
 #define MAXWELL_COMPUTE_A                                            0x0000b0c0
 #define MAXWELL_COMPUTE_B                                            0x0000b1c0
+#define PASCAL_COMPUTE_A                                             0x0000c0c0
 
 #define NV74_CIPHER                                                  0x000074c1
 #endif
index c612dc1f1eb4ef651faa4ec398904a894b0ac814..7ea8aa7ca40859b0bcdf4cca65f57e4ba1caacd9 100644 (file)
@@ -16,9 +16,9 @@ enum nvkm_devidx {
        NVKM_SUBDEV_MC,
        NVKM_SUBDEV_BUS,
        NVKM_SUBDEV_TIMER,
+       NVKM_SUBDEV_INSTMEM,
        NVKM_SUBDEV_FB,
        NVKM_SUBDEV_LTC,
-       NVKM_SUBDEV_INSTMEM,
        NVKM_SUBDEV_MMU,
        NVKM_SUBDEV_BAR,
        NVKM_SUBDEV_PMU,
@@ -33,7 +33,10 @@ enum nvkm_devidx {
        NVKM_ENGINE_CE0,
        NVKM_ENGINE_CE1,
        NVKM_ENGINE_CE2,
-       NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2,
+       NVKM_ENGINE_CE3,
+       NVKM_ENGINE_CE4,
+       NVKM_ENGINE_CE5,
+       NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5,
 
        NVKM_ENGINE_CIPHER,
        NVKM_ENGINE_DISP,
@@ -50,7 +53,8 @@ enum nvkm_devidx {
 
        NVKM_ENGINE_NVENC0,
        NVKM_ENGINE_NVENC1,
-       NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1,
+       NVKM_ENGINE_NVENC2,
+       NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
 
        NVKM_ENGINE_NVDEC,
        NVKM_ENGINE_PM,
@@ -102,6 +106,7 @@ struct nvkm_device {
                NV_C0    = 0xc0,
                NV_E0    = 0xe0,
                GM100    = 0x110,
+               GP100    = 0x130,
        } card_type;
        u32 chipset;
        u8  chiprev;
@@ -136,7 +141,7 @@ struct nvkm_device {
        struct nvkm_volt *volt;
 
        struct nvkm_engine *bsp;
-       struct nvkm_engine *ce[3];
+       struct nvkm_engine *ce[6];
        struct nvkm_engine *cipher;
        struct nvkm_disp *disp;
        struct nvkm_dma *dma;
@@ -149,7 +154,7 @@ struct nvkm_device {
        struct nvkm_engine *mspdec;
        struct nvkm_engine *msppp;
        struct nvkm_engine *msvld;
-       struct nvkm_engine *nvenc[2];
+       struct nvkm_engine *nvenc[3];
        struct nvkm_engine *nvdec;
        struct nvkm_pm *pm;
        struct nvkm_engine *sec;
@@ -170,7 +175,6 @@ struct nvkm_device_func {
        void (*fini)(struct nvkm_device *, bool suspend);
        resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
        resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
-       bool cpu_coherent;
 };
 
 struct nvkm_device_quirk {
@@ -206,7 +210,7 @@ struct nvkm_device_chip {
        int (*volt    )(struct nvkm_device *, int idx, struct nvkm_volt **);
 
        int (*bsp     )(struct nvkm_device *, int idx, struct nvkm_engine **);
-       int (*ce[3]   )(struct nvkm_device *, int idx, struct nvkm_engine **);
+       int (*ce[6]   )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*cipher  )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*disp    )(struct nvkm_device *, int idx, struct nvkm_disp **);
        int (*dma     )(struct nvkm_device *, int idx, struct nvkm_dma **);
@@ -219,7 +223,7 @@ struct nvkm_device_chip {
        int (*mspdec  )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*msppp   )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*msvld   )(struct nvkm_device *, int idx, struct nvkm_engine **);
-       int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **);
+       int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*nvdec   )(struct nvkm_device *, int idx, struct nvkm_engine **);
        int (*pm      )(struct nvkm_device *, int idx, struct nvkm_pm **);
        int (*sec     )(struct nvkm_device *, int idx, struct nvkm_engine **);
index b5370cb56e3c2c4fbb539ffd9f0156c569af38c1..e5c9b6268dcca59efdaab12e5922b3a5e0ac2b2b 100644 (file)
@@ -28,6 +28,7 @@ struct nvkm_device_tegra {
        } iommu;
 
        int gpu_speedo;
+       int gpu_speedo_id;
 };
 
 struct nvkm_device_tegra_func {
index 594d719ba41e81edd21f8b079d8f6bfbd85a7a6a..d3d26a1e215dacc0d94d180bd4cdea4f89ebb48a 100644 (file)
@@ -7,4 +7,6 @@ int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
 #endif
index d4fdce27b2978173bab089f54004ba1f4f5392f1..e82049667ce495f534921611d0ef55e9aea454be 100644 (file)
@@ -32,4 +32,6 @@ int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
index 15ddfcf5e8dbf2e6d1a459a361436625099e7d43..ed92fec5292c5b41d7de5b2e4ed01ce0c04e2feb 100644 (file)
@@ -66,4 +66,5 @@ int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
 #endif
index 6515f5810a265e334a7c3d21af9c365bf3dcea19..89cf993078283f8bca3c5016329f9e5404806c93 100644 (file)
@@ -42,4 +42,5 @@ int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
 #endif
index e39a1fea930beefa28d0ab5ac2400cb7181952ad..a72f3290528a4a0c8bddca75c3d8f3b6e186c95c 100644 (file)
@@ -7,6 +7,9 @@ struct nvkm_bios {
        u32 size;
        u8 *data;
 
+       u32 image0_size;
+       u32 imaged_addr;
+
        u32 bmp_offset;
        u32 bit_offset;
 
@@ -22,10 +25,9 @@ struct nvkm_bios {
 u8  nvbios_checksum(const u8 *data, int size);
 u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
 int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
-
-#define nvbios_rd08(b,o) (b)->data[(o)]
-#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)])
-#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
+u8  nvbios_rd08(struct nvkm_bios *, u32 addr);
+u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
+u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
 
 int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
 #endif
index db10c11f0595deabf3f3acbcac7299b658a388a8..c5a6ebd5a478691c90c61175adec8cb4df6c4600 100644 (file)
@@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
 
 struct nvbios_ocfg {
-       u16 match;
+       u8  proto;
+       u8  flags;
        u16 clkcmp[2];
 };
 
@@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
 u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
-u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
+u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
                      u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
 u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
 #endif
index 0a734fd06acf347aeee52368ee159c62d99a4e75..3a410275fa71165c3adb6f0a7765e896ff7ce1ad 100644 (file)
@@ -56,6 +56,8 @@ struct nvkm_fb {
                int regions;
        } tile;
 
+       u8 page;
+
        struct nvkm_memory *mmu_rd;
        struct nvkm_memory *mmu_wr;
 };
@@ -91,6 +93,8 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
index c6b90b6543b3e8751b5effbfb0f888f2e210fbe5..cd755baf9cab709b1384b2f1a0177117a5258721 100644 (file)
@@ -38,4 +38,5 @@ int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
+int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
 #endif
index 2e80682b2da1a7305d65c5911c099697120bef68..27d25b18d85c0d1da40adb950ac9f55826953f5f 100644 (file)
@@ -7,11 +7,14 @@ struct nvkm_mc {
        struct nvkm_subdev subdev;
 };
 
-void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
-void nvkm_mc_intr_unarm(struct nvkm_mc *);
-void nvkm_mc_intr_rearm(struct nvkm_mc *);
-void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx);
-void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
+void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
+void nvkm_mc_intr(struct nvkm_device *, bool *handled);
+void nvkm_mc_intr_unarm(struct nvkm_device *);
+void nvkm_mc_intr_rearm(struct nvkm_device *);
+void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
+void nvkm_mc_unk260(struct nvkm_device *, u32 data);
 
 int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
@@ -24,4 +27,5 @@ int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
index ddb913889d7e2b69307b4d823fae72ce9e9697c8..e6523e2cea9fee49300e14b194dfe87f5e7ed94f 100644 (file)
@@ -47,6 +47,7 @@ int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
 int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
 int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
 int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
+int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
 
 /* pcie functions */
 int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
index c6edd95a5b69c4e113eb9599ad4dbc6bbebe53db..b04c38c07761e60554396cae8e51f9114058f86e 100644 (file)
@@ -43,9 +43,8 @@ struct nvkm_secboot {
        const struct nvkm_secboot_func *func;
        struct nvkm_subdev subdev;
 
+       enum nvkm_devidx devidx;
        u32 base;
-       u32 irq_mask;
-       u32 enable_mask;
 };
 #define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
 
index 8fb575a92c48f69336b844eb09575fbfcc73beb5..71ebbfd4484fc9b824cdb84cb07d30449c7576e7 100644 (file)
@@ -8,10 +8,11 @@ struct nvkm_top {
        struct list_head device;
 };
 
-u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx);
-u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs);
-enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault);
-enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn);
+u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
+u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
+u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
+enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
+enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
 
 int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
 #endif
index feff55cff05bbf994ffed76ecf9ca05eac3055b6..b765f4ffcde63ba284aeba11ba0d757624e91253 100644 (file)
@@ -12,6 +12,9 @@ struct nvkm_volt {
                u32 uv;
                u8 vid;
        } vid[256];
+
+       u32 max_uv;
+       u32 min_uv;
 };
 
 int nvkm_volt_get(struct nvkm_volt *);
index eb7de487a2b3f16936c70b183c4945d1e26072a8..7bd4683216d016dcbc78aa088a2b7bd67d4453a5 100644 (file)
@@ -100,6 +100,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
        case NV_DEVICE_INFO_V0_FERMI:
        case NV_DEVICE_INFO_V0_KEPLER:
        case NV_DEVICE_INFO_V0_MAXWELL:
+       case NV_DEVICE_INFO_V0_PASCAL:
                return NVIF_CLASS_SW_GF100;
        }
 
index 5e3f3e826476cc0b71b64bc12d615841191cebd7..528bdeffb339ecaaa887c97887dc80427a153c77 100644 (file)
@@ -209,8 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
        nvbo->tile_flags = tile_flags;
        nvbo->bo.bdev = &drm->ttm.bdev;
 
-       if (!nvxx_device(&drm->device)->func->cpu_coherent)
-               nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
+       nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
 
        nvbo->page_shift = 12;
        if (drm->client.vm) {
@@ -424,13 +423,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
        if (ret)
                return ret;
 
-       /*
-        * TTM buffers allocated using the DMA API already have a mapping, let's
-        * use it instead.
-        */
-       if (!nvbo->force_coherent)
-               ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
-                                 &nvbo->kmap);
+       ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
 
        ttm_bo_unreserve(&nvbo->bo);
        return ret;
@@ -442,12 +435,7 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
        if (!nvbo)
                return;
 
-       /*
-        * TTM buffers allocated using the DMA API already had a coherent
-        * mapping which we used, no need to unmap.
-        */
-       if (!nvbo->force_coherent)
-               ttm_bo_kunmap(&nvbo->kmap);
+       ttm_bo_kunmap(&nvbo->kmap);
 }
 
 void
@@ -506,35 +494,13 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
        return 0;
 }
 
-static inline void *
-_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
-{
-       struct ttm_dma_tt *dma_tt;
-       u8 *m = mem;
-
-       index *= sz;
-
-       if (m) {
-               /* kmap'd address, return the corresponding offset */
-               m += index;
-       } else {
-               /* DMA-API mapping, lookup the right address */
-               dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
-               m = dma_tt->cpu_address[index / PAGE_SIZE];
-               m += index % PAGE_SIZE;
-       }
-
-       return m;
-}
-#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
-
 void
 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
 {
        bool is_iomem;
        u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 
-       mem = nouveau_bo_mem_index(nvbo, index, mem);
+       mem += index;
 
        if (is_iomem)
                iowrite16_native(val, (void __force __iomem *)mem);
@@ -548,7 +514,7 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
        bool is_iomem;
        u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 
-       mem = nouveau_bo_mem_index(nvbo, index, mem);
+       mem += index;
 
        if (is_iomem)
                return ioread32_native((void __force __iomem *)mem);
@@ -562,7 +528,7 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
        bool is_iomem;
        u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
 
-       mem = nouveau_bo_mem_index(nvbo, index, mem);
+       mem += index;
 
        if (is_iomem)
                iowrite32_native(val, (void __force __iomem *)mem);
@@ -1082,7 +1048,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
                                ret = ttm_bo_move_accel_cleanup(bo,
                                                                &fence->base,
                                                                evict,
-                                                               no_wait_gpu,
                                                                new_mem);
                                nouveau_fence_unref(&fence);
                        }
@@ -1104,6 +1069,10 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
                            struct ttm_mem_reg *, struct ttm_mem_reg *);
                int (*init)(struct nouveau_channel *, u32 handle);
        } _methods[] = {
+               {  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
+               {  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
+               {  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
+               {  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
                {  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
                {  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
                {  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1289,6 +1258,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
        struct nouveau_drm_tile *new_tile = NULL;
        int ret = 0;
 
+       ret = ttm_bo_wait(bo, intr, no_wait_gpu);
+       if (ret)
+               return ret;
+
        if (nvbo->pin_refcnt)
                NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
 
@@ -1324,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
        /* Fallback to software copy. */
        ret = ttm_bo_wait(bo, intr, no_wait_gpu);
        if (ret == 0)
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+               ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
 
 out:
        if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1488,14 +1461,6 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
        dev = drm->dev;
        pdev = device->dev;
 
-       /*
-        * Objects matching this condition have been marked as force_coherent,
-        * so use the DMA API for them.
-        */
-       if (!nvxx_device(&drm->device)->func->cpu_coherent &&
-           ttm->caching_state == tt_uncached)
-               return ttm_dma_populate(ttm_dma, dev->dev);
-
 #if IS_ENABLED(CONFIG_AGP)
        if (drm->agp.bridge) {
                return ttm_agp_tt_populate(ttm);
@@ -1553,16 +1518,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
        dev = drm->dev;
        pdev = device->dev;
 
-       /*
-        * Objects matching this condition have been marked as force_coherent,
-        * so use the DMA API for them.
-        */
-       if (!nvxx_device(&drm->device)->func->cpu_coherent &&
-           ttm->caching_state == tt_uncached) {
-               ttm_dma_unpopulate(ttm_dma, dev->dev);
-               return;
-       }
-
 #if IS_ENABLED(CONFIG_AGP)
        if (drm->agp.bridge) {
                ttm_agp_tt_unpopulate(ttm);
index b1d2527c56256372007537399f0abaa11d4f0d49..f9b3c811187e117aba807697d3e27b984f0d0bf3 100644 (file)
@@ -191,7 +191,8 @@ static int
 nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
                    u32 engine, struct nouveau_channel **pchan)
 {
-       static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A,
+       static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
+                                       MAXWELL_CHANNEL_GPFIFO_A,
                                        KEPLER_CHANNEL_GPFIFO_B,
                                        KEPLER_CHANNEL_GPFIFO_A,
                                        FERMI_CHANNEL_GPFIFO,
index 7c77f960c8b8662ea171e98be8522166221110f7..afbf557b23d4395f34c15a299a6b5e2e9510c924 100644 (file)
@@ -47,7 +47,7 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
 {
        struct nouveau_crtc *nv_crtc =
                container_of(notify, typeof(*nv_crtc), vblank);
-       drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index);
+       drm_crtc_handle_vblank(&nv_crtc->base);
        return NVIF_NOTIFY_KEEP;
 }
 
@@ -495,6 +495,8 @@ nouveau_display_create(struct drm_device *dev)
 
        if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
                static const u16 oclass[] = {
+                       GP104_DISP,
+                       GP100_DISP,
                        GM200_DISP,
                        GM107_DISP,
                        GK110_DISP,
@@ -554,6 +556,7 @@ nouveau_display_destroy(struct drm_device *dev)
        nouveau_display_vblank_fini(dev);
 
        drm_kms_helper_poll_fini(dev);
+       drm_crtc_force_disable_all(dev);
        drm_mode_config_cleanup(dev);
 
        if (disp->dtor)
@@ -760,12 +763,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        /* Initialize a page flip struct */
        *s = (struct nouveau_page_flip_state)
-               { { }, event, nouveau_crtc(crtc)->index,
-                 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
+               { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0],
                  new_bo->bo.offset };
 
        /* Keep vblanks on during flip, for the target crtc of this flip */
-       drm_vblank_get(dev, nouveau_crtc(crtc)->index);
+       drm_crtc_vblank_get(crtc);
 
        /* Emit a page flip */
        if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
@@ -810,7 +812,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        return 0;
 
 fail_unreserve:
-       drm_vblank_put(dev, nouveau_crtc(crtc)->index);
+       drm_crtc_vblank_put(crtc);
        ttm_bo_unreserve(&old_bo->bo);
 fail_unpin:
        mutex_unlock(&cli->mutex);
@@ -842,17 +844,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
        s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
        if (s->event) {
                if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
-                       drm_arm_vblank_event(dev, s->crtc, s->event);
+                       drm_crtc_arm_vblank_event(s->crtc, s->event);
                } else {
-                       drm_send_vblank_event(dev, s->crtc, s->event);
+                       drm_crtc_send_vblank_event(s->crtc, s->event);
 
                        /* Give up ownership of vblank for page-flipped crtc */
-                       drm_vblank_put(dev, s->crtc);
+                       drm_crtc_vblank_put(s->crtc);
                }
        }
        else {
                /* Give up ownership of vblank for page-flipped crtc */
-               drm_vblank_put(dev, s->crtc);
+               drm_crtc_vblank_put(s->crtc);
        }
 
        list_del(&s->head);
@@ -873,9 +875,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
 
        if (!nouveau_finish_page_flip(chan, &state)) {
                if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
-                       nv_set_crtc_base(drm->dev, state.crtc, state.offset +
-                                        state.y * state.pitch +
-                                        state.x * state.bpp / 8);
+                       nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
+                                        state.offset + state.crtc->y *
+                                        state.pitch + state.crtc->x *
+                                        state.bpp / 8);
                }
        }
 
index 24273bacd885520c4d007c40a5efd2105d842d1b..0420ee861ea4a5d2f70f631867640182104bacd5 100644 (file)
@@ -28,7 +28,8 @@ int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
 struct nouveau_page_flip_state {
        struct list_head head;
        struct drm_pending_vblank_event *event;
-       int crtc, bpp, pitch, x, y;
+       struct drm_crtc *crtc;
+       int bpp, pitch;
        u64 offset;
 };
 
index 11f8dd9c0edb8657111ee2f340c364daea6df783..66c1280c0f1f2854aa501f810c50a11e6149eaaa 100644 (file)
  * Authors: Ben Skeggs
  */
 
-#include <linux/apple-gmux.h>
 #include <linux/console.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 
 #include "drmP.h"
@@ -200,6 +198,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
                case KEPLER_CHANNEL_GPFIFO_A:
                case KEPLER_CHANNEL_GPFIFO_B:
                case MAXWELL_CHANNEL_GPFIFO_A:
+               case PASCAL_CHANNEL_GPFIFO_A:
                        ret = nvc0_fence_create(drm);
                        break;
                default:
@@ -315,16 +314,19 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
        bool boot = false;
        int ret;
 
-       /*
-        * apple-gmux is needed on dual GPU MacBook Pro
-        * to probe the panel if we're the inactive GPU.
-        */
-       if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
-           apple_gmux_present() && pdev != vga_default_device() &&
-           !vga_switcheroo_handler_flags())
+       if (vga_switcheroo_client_probe_defer(pdev))
                return -EPROBE_DEFER;
 
-       /* remove conflicting drivers (vesafb, efifb etc) */
+       /* We need to check that the chipset is supported before booting
+        * fbdev off the hardware, as there's no way to put it back.
+        */
+       ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+       if (ret)
+               return ret;
+
+       nvkm_device_del(&device);
+
+       /* Remove conflicting drivers (vesafb, efifb etc). */
        aper = alloc_apertures(3);
        if (!aper)
                return -ENOMEM;
@@ -438,6 +440,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        nouveau_vga_init(drm);
 
        if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+               if (!nvxx_device(&drm->device)->mmu) {
+                       ret = -ENOSYS;
+                       goto fail_device;
+               }
+
                ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
                                  0x1000, NULL, &drm->client.vm);
                if (ret)
@@ -498,7 +505,11 @@ nouveau_drm_unload(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
 
-       pm_runtime_get_sync(dev->dev);
+       if (nouveau_runtime_pm != 0) {
+               pm_runtime_get_sync(dev->dev);
+               pm_runtime_forbid(dev->dev);
+       }
+
        nouveau_fbcon_fini(dev);
        nouveau_accel_fini(drm);
        nouveau_hwmon_fini(dev);
@@ -970,7 +981,7 @@ driver_stub = {
        .gem_prime_vmap = nouveau_gem_prime_vmap,
        .gem_prime_vunmap = nouveau_gem_prime_vunmap,
 
-       .gem_free_object = nouveau_gem_object_del,
+       .gem_free_object_unlocked = nouveau_gem_object_del,
        .gem_open_object = nouveau_gem_object_open,
        .gem_close_object = nouveau_gem_object_close,
 
@@ -1078,7 +1089,6 @@ nouveau_drm_init(void)
        driver_pci = driver_stub;
        driver_pci.set_busid = drm_pci_set_busid;
        driver_platform = driver_stub;
-       driver_platform.set_busid = drm_platform_set_busid;
 
        nouveau_display_options();
 
index 57aaf98a26f9c3d5bcfd91e351f29c2e89b7c276..d1f248fd35061993db2529c4b93e557a4331e829 100644 (file)
@@ -552,6 +552,8 @@ nouveau_fbcon_init(struct drm_device *dev)
        if (ret)
                goto fini;
 
+       if (fbcon->helper.fbdev)
+               fbcon->helper.fbdev->pixmap.buf_align = 4;
        return 0;
 
 fini:
index 2e3a62d38fe9d48503daf8d1450c5fcf256f1e02..64c4ce7115ad8902d9b79714d8a476d12d1e71c3 100644 (file)
@@ -57,7 +57,8 @@ struct nouveau_fence_priv {
        int  (*context_new)(struct nouveau_channel *);
        void (*context_del)(struct nouveau_channel *);
 
-       u32 contexts, context_base;
+       u32 contexts;
+       u64 context_base;
        bool uevent;
 };
 
index 1ff4166af26ece5b20927f89c14297eb853841be..71f764bf4cc6fbca2713f974a5ed535f030e84a8 100644 (file)
@@ -534,6 +534,40 @@ nouveau_hwmon_get_in0_input(struct device *d,
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO,
                          nouveau_hwmon_get_in0_input, NULL, 0);
 
+static ssize_t
+nouveau_hwmon_get_in0_min(struct device *d,
+                           struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+       if (!volt || !volt->min_uv)
+               return -ENODEV;
+
+       return sprintf(buf, "%i\n", volt->min_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO,
+                         nouveau_hwmon_get_in0_min, NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_in0_max(struct device *d,
+                           struct device_attribute *a, char *buf)
+{
+       struct drm_device *dev = dev_get_drvdata(d);
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nvkm_volt *volt = nvxx_volt(&drm->device);
+
+       if (!volt || !volt->max_uv)
+               return -ENODEV;
+
+       return sprintf(buf, "%i\n", volt->max_uv / 1000);
+}
+
+static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO,
+                         nouveau_hwmon_get_in0_max, NULL, 0);
+
 static ssize_t
 nouveau_hwmon_get_in0_label(struct device *d,
                            struct device_attribute *a, char *buf)
@@ -594,6 +628,8 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
 
 static struct attribute *hwmon_in0_attributes[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
+       &sensor_dev_attr_in0_min.dev_attr.attr,
+       &sensor_dev_attr_in0_max.dev_attr.attr,
        &sensor_dev_attr_in0_label.dev_attr.attr,
        NULL
 };
index bcee91497eb9778fb1c8c266369b33cdfe44a2b7..1825dbc331926c84de69e13606f38cf4c80a2bed 100644 (file)
@@ -164,6 +164,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
        case NV_DEVICE_INFO_V0_FERMI:
        case NV_DEVICE_INFO_V0_KEPLER:
        case NV_DEVICE_INFO_V0_MAXWELL:
+       case NV_DEVICE_INFO_V0_PASCAL:
                node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
                break;
        default:
index 675e9e077a95c6ec7a69a6891a7d6f16b8c6b139..08f9c6fa0f7f210d3e3fd5a0fbe8f11ff40b1972 100644 (file)
@@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
        ntfy->p->base.event = &ntfy->p->e.base;
        ntfy->p->base.file_priv = f;
        ntfy->p->base.pid = current->pid;
-       ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
        ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
        ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
 
index 0f3e4bb411cc99c9052beb5b862dd9927888ad7e..7d9248b8c66479b7b72c51a6e856e25428eebb49 100644 (file)
@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        uint32_t fg;
        uint32_t bg;
        uint32_t dsize;
-       uint32_t width;
        uint32_t *data = (uint32_t *)image->data;
        int ret;
 
@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (ret)
                return ret;
 
-       width = ALIGN(image->width, 8);
-       dsize = ALIGN(width * image->height, 32) >> 5;
-
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
                fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
                         ((image->dx + image->width) & 0xffff));
        OUT_RING(chan, bg);
        OUT_RING(chan, fg);
-       OUT_RING(chan, (image->height << 16) | width);
+       OUT_RING(chan, (image->height << 16) | image->width);
        OUT_RING(chan, (image->height << 16) | image->width);
        OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
 
+       dsize = ALIGN(image->width * image->height, 32) >> 5;
        while (dsize) {
                int iter_len = dsize > 128 ? 128 : dsize;
 
index 3ffc2b0057bfeaa0dd1f34ed57f6131f457ab121..7d0edcbcfca7794b9f05c55dd3184ee3ebf90f2f 100644 (file)
@@ -297,6 +297,8 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
                .pushbuf = 0xb0007d00,
        };
        static const s32 oclass[] = {
+               GP104_DISP_CORE_CHANNEL_DMA,
+               GP100_DISP_CORE_CHANNEL_DMA,
                GM200_DISP_CORE_CHANNEL_DMA,
                GM107_DISP_CORE_CHANNEL_DMA,
                GK110_DISP_CORE_CHANNEL_DMA,
@@ -1346,21 +1348,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
        return 0;
 }
 
-static void
+static int
 nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-                   uint32_t start, uint32_t size)
+                   uint32_t size)
 {
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-       u32 end = min_t(u32, start + size, 256);
        u32 i;
 
-       for (i = start; i < end; i++) {
+       for (i = 0; i < size; i++) {
                nv_crtc->lut.r[i] = r[i];
                nv_crtc->lut.g[i] = g[i];
                nv_crtc->lut.b[i] = b[i];
        }
 
        nv50_crtc_lut_load(crtc);
+
+       return 0;
 }
 
 static void
index 33d9ee0fac4084918c002d2164977a53e03ae65a..1aeb698e97079bd2e13c515f610a77155497b3fb 100644 (file)
@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
        struct nouveau_channel *chan = drm->channel;
-       uint32_t width, dwords, *data = (uint32_t *)image->data;
+       uint32_t dwords, *data = (uint32_t *)image->data;
        uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
        uint32_t *palette = info->pseudo_palette;
        int ret;
@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (ret)
                return ret;
 
-       width = ALIGN(image->width, 32);
-       dwords = (width * image->height) >> 5;
-
        BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        OUT_RING(chan, 0);
        OUT_RING(chan, image->dy);
 
+       dwords = ALIGN(image->width * image->height, 32) >> 5;
        while (dwords) {
                int push = dwords > 2047 ? 2047 : dwords;
 
index a0913359ac05a02046f9286254b98fa58a650ef4..839f4c8c18056c5bce7dbd8c1511db77784609d8 100644 (file)
@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        struct nouveau_fbdev *nfbdev = info->par;
        struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
        struct nouveau_channel *chan = drm->channel;
-       uint32_t width, dwords, *data = (uint32_t *)image->data;
+       uint32_t dwords, *data = (uint32_t *)image->data;
        uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
        uint32_t *palette = info->pseudo_palette;
        int ret;
@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        if (ret)
                return ret;
 
-       width = ALIGN(image->width, 32);
-       dwords = (width * image->height) >> 5;
-
        BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
        if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
            info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
        OUT_RING  (chan, 0);
        OUT_RING  (chan, image->dy);
 
+       dwords = ALIGN(image->width * image->height, 32) >> 5;
        while (dwords) {
                int push = dwords > 2047 ? 2047 : dwords;
 
index b18557858f19e25c8fbbad4b34d5ec58cd4a8d17..19044aba265eb165efa73c74d76f8f042c28873d 100644 (file)
@@ -57,6 +57,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
        [NVKM_ENGINE_CE0     ] = "ce0",
        [NVKM_ENGINE_CE1     ] = "ce1",
        [NVKM_ENGINE_CE2     ] = "ce2",
+       [NVKM_ENGINE_CE3     ] = "ce3",
+       [NVKM_ENGINE_CE4     ] = "ce4",
+       [NVKM_ENGINE_CE5     ] = "ce5",
        [NVKM_ENGINE_CIPHER  ] = "cipher",
        [NVKM_ENGINE_DISP    ] = "disp",
        [NVKM_ENGINE_DMAOBJ  ] = "dma",
@@ -71,6 +74,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
        [NVKM_ENGINE_MSVLD   ] = "msvld",
        [NVKM_ENGINE_NVENC0  ] = "nvenc0",
        [NVKM_ENGINE_NVENC1  ] = "nvenc1",
+       [NVKM_ENGINE_NVENC2  ] = "nvenc2",
        [NVKM_ENGINE_NVDEC   ] = "nvdec",
        [NVKM_ENGINE_PM      ] = "pm",
        [NVKM_ENGINE_SEC     ] = "sec",
@@ -105,7 +109,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
                }
        }
 
-       nvkm_mc_reset(device->mc, subdev->index);
+       nvkm_mc_reset(device, subdev->index);
 
        time = ktime_to_us(ktime_get()) - time;
        nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
index 9c19d59b47df60b0c5ec79661b096052cc259ddc..a4458a8eb30a7f5da77dee8960cf7aaf81e6914e 100644 (file)
@@ -3,3 +3,5 @@ nvkm-y += nvkm/engine/ce/gf100.o
 nvkm-y += nvkm/engine/ce/gk104.o
 nvkm-y += nvkm/engine/ce/gm107.o
 nvkm-y += nvkm/engine/ce/gm200.o
+nvkm-y += nvkm/engine/ce/gp100.o
+nvkm-y += nvkm/engine/ce/gp104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
new file mode 100644 (file)
index 0000000..c771045
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_enum
+gp100_ce_launcherr_report[] = {
+       { 0x0, "NO_ERR" },
+       { 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
+       { 0x2, "INVALID_ALIGNMENT" },
+       { 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
+       { 0x4, "SRC_LINE_EXCEEDS_PITCH" },
+       { 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
+       { 0x6, "DST_LINE_EXCEEDS_PITCH" },
+       { 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
+       { 0x8, "BAD_SRC_PIXEL_COMP_REF" },
+       { 0x9, "INVALID_VALUE" },
+       { 0xa, "UNUSED_FIELD" },
+       { 0xb, "INVALID_OPERATION" },
+       { 0xc, "NO_RESOURCES" },
+       { 0xd, "INVALID_CONFIG" },
+       {}
+};
+
+static void
+gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
+{
+       struct nvkm_subdev *subdev = &ce->subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 stat = nvkm_rd32(device, 0x104418 + base);
+       const struct nvkm_enum *en =
+               nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f);
+       nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
+}
+
+void
+gp100_ce_intr(struct nvkm_engine *ce)
+{
+       const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
+       struct nvkm_subdev *subdev = &ce->subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 mask = nvkm_rd32(device, 0x10440c + base);
+       u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
+       if (intr & 0x00000001) { //XXX: guess
+               nvkm_warn(subdev, "BLOCKPIPE\n");
+               nvkm_wr32(device, 0x104410 + base, 0x00000001);
+               intr &= ~0x00000001;
+       }
+       if (intr & 0x00000002) { //XXX: guess
+               nvkm_warn(subdev, "NONBLOCKPIPE\n");
+               nvkm_wr32(device, 0x104410 + base, 0x00000002);
+               intr &= ~0x00000002;
+       }
+       if (intr & 0x00000004) {
+               gp100_ce_intr_launcherr(ce, base);
+               nvkm_wr32(device, 0x104410 + base, 0x00000004);
+               intr &= ~0x00000004;
+       }
+       if (intr) {
+               nvkm_warn(subdev, "intr %08x\n", intr);
+               nvkm_wr32(device, 0x104410 + base, intr);
+       }
+}
+
+static const struct nvkm_engine_func
+gp100_ce = {
+       .intr = gp100_ce_intr,
+       .sclass = {
+               { -1, -1, PASCAL_DMA_COPY_A },
+               {}
+       }
+};
+
+int
+gp100_ce_new(struct nvkm_device *device, int index,
+            struct nvkm_engine **pengine)
+{
+       return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
new file mode 100644 (file)
index 0000000..20e0197
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+#include <core/enum.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+gp104_ce = {
+       .intr = gp100_ce_intr,
+       .sclass = {
+               { -1, -1, PASCAL_DMA_COPY_B },
+               { -1, -1, PASCAL_DMA_COPY_A },
+               {}
+       }
+};
+
+int
+gp104_ce_new(struct nvkm_device *device, int index,
+            struct nvkm_engine **pengine)
+{
+       return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
+}
index e2fa8b1619437597df5df065e6792e24bd11f97c..2dce405976ad6a552855aef9ce61b63cb234dc1a 100644 (file)
@@ -4,4 +4,5 @@
 
 void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
 void gk104_ce_intr(struct nvkm_engine *);
+void gp100_ce_intr(struct nvkm_engine *);
 #endif
index 4572debcb0c9862f54fa5d7f4db436b885b2df42..7218a067a6c5e85764f1f330cb05b10c7e50132c 100644 (file)
@@ -2148,6 +2148,67 @@ nv12b_chipset = {
        .sw = gf100_sw_new,
 };
 
+static const struct nvkm_device_chip
+nv130_chipset = {
+       .name = "GP100",
+       .bar = gf100_bar_new,
+       .bios = nvkm_bios_new,
+       .bus = gf100_bus_new,
+       .devinit = gm200_devinit_new,
+       .fb = gp100_fb_new,
+       .fuse = gm107_fuse_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .ltc = gp100_ltc_new,
+       .mc = gp100_mc_new,
+       .mmu = gf100_mmu_new,
+       .secboot = gm200_secboot_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+       .top = gk104_top_new,
+       .ce[0] = gp100_ce_new,
+       .ce[1] = gp100_ce_new,
+       .ce[2] = gp100_ce_new,
+       .ce[3] = gp100_ce_new,
+       .ce[4] = gp100_ce_new,
+       .ce[5] = gp100_ce_new,
+       .dma = gf119_dma_new,
+       .disp = gp100_disp_new,
+       .fifo = gp100_fifo_new,
+       .gr = gp100_gr_new,
+       .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv134_chipset = {
+       .name = "GP104",
+       .bar = gf100_bar_new,
+       .bios = nvkm_bios_new,
+       .bus = gf100_bus_new,
+       .devinit = gm200_devinit_new,
+       .fb = gp104_fb_new,
+       .fuse = gm107_fuse_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .ltc = gp100_ltc_new,
+       .mc = gp100_mc_new,
+       .mmu = gf100_mmu_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+       .top = gk104_top_new,
+       .ce[0] = gp104_ce_new,
+       .ce[1] = gp104_ce_new,
+       .ce[2] = gp104_ce_new,
+       .ce[3] = gp104_ce_new,
+       .disp = gp104_disp_new,
+       .dma = gf119_dma_new,
+       .fifo = gp100_fifo_new,
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -2221,6 +2282,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
        _(CE0    , device->ce[0]   ,  device->ce[0]);
        _(CE1    , device->ce[1]   ,  device->ce[1]);
        _(CE2    , device->ce[2]   ,  device->ce[2]);
+       _(CE3    , device->ce[3]   ,  device->ce[3]);
+       _(CE4    , device->ce[4]   ,  device->ce[4]);
+       _(CE5    , device->ce[5]   ,  device->ce[5]);
        _(CIPHER , device->cipher  ,  device->cipher);
        _(DISP   , device->disp    , &device->disp->engine);
        _(DMAOBJ , device->dma     , &device->dma->engine);
@@ -2235,6 +2299,7 @@ nvkm_device_engine(struct nvkm_device *device, int index)
        _(MSVLD  , device->msvld   ,  device->msvld);
        _(NVENC0 , device->nvenc[0],  device->nvenc[0]);
        _(NVENC1 , device->nvenc[1],  device->nvenc[1]);
+       _(NVENC2 , device->nvenc[2],  device->nvenc[2]);
        _(NVDEC  , device->nvdec   ,  device->nvdec);
        _(PM     , device->pm      , &device->pm->engine);
        _(SEC    , device->sec     ,  device->sec);
@@ -2492,6 +2557,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                        case 0x100: device->card_type = NV_E0; break;
                        case 0x110:
                        case 0x120: device->card_type = GM100; break;
+                       case 0x130: device->card_type = GP100; break;
                        default:
                                break;
                        }
@@ -2576,6 +2642,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x124: device->chip = &nv124_chipset; break;
                case 0x126: device->chip = &nv126_chipset; break;
                case 0x12b: device->chip = &nv12b_chipset; break;
+               case 0x130: device->chip = &nv130_chipset; break;
+               case 0x134: device->chip = &nv134_chipset; break;
                default:
                        nvdev_error(device, "unknown chipset (%08x)\n", boot0);
                        goto done;
@@ -2659,6 +2727,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                _(NVKM_ENGINE_CE0     ,    ce[0]);
                _(NVKM_ENGINE_CE1     ,    ce[1]);
                _(NVKM_ENGINE_CE2     ,    ce[2]);
+               _(NVKM_ENGINE_CE3     ,    ce[3]);
+               _(NVKM_ENGINE_CE4     ,    ce[4]);
+               _(NVKM_ENGINE_CE5     ,    ce[5]);
                _(NVKM_ENGINE_CIPHER  ,   cipher);
                _(NVKM_ENGINE_DISP    ,     disp);
                _(NVKM_ENGINE_DMAOBJ  ,      dma);
@@ -2673,6 +2744,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                _(NVKM_ENGINE_MSVLD   ,    msvld);
                _(NVKM_ENGINE_NVENC0  , nvenc[0]);
                _(NVKM_ENGINE_NVENC1  , nvenc[1]);
+               _(NVKM_ENGINE_NVENC2  , nvenc[2]);
                _(NVKM_ENGINE_NVDEC   ,    nvdec);
                _(NVKM_ENGINE_PM      ,       pm);
                _(NVKM_ENGINE_SEC     ,      sec);
index 18fab3973ce5f9b0c2a527d3fcfeb07b465288e5..b1b693219db3f70edbf29ae5f508141937e84e42 100644 (file)
@@ -1614,7 +1614,6 @@ nvkm_device_pci_func = {
        .fini = nvkm_device_pci_fini,
        .resource_addr = nvkm_device_pci_resource_addr,
        .resource_size = nvkm_device_pci_resource_size,
-       .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
 };
 
 int
index ec12efb4689a7ea8a50fd7ee8626f5df4440f0a3..939682f18788feda2d5b30b045140bc696e476db 100644 (file)
@@ -191,13 +191,11 @@ static irqreturn_t
 nvkm_device_tegra_intr(int irq, void *arg)
 {
        struct nvkm_device_tegra *tdev = arg;
-       struct nvkm_mc *mc = tdev->device.mc;
+       struct nvkm_device *device = &tdev->device;
        bool handled = false;
-       if (likely(mc)) {
-               nvkm_mc_intr_unarm(mc);
-               nvkm_mc_intr(mc, &handled);
-               nvkm_mc_intr_rearm(mc);
-       }
+       nvkm_mc_intr_unarm(device);
+       nvkm_mc_intr(device, &handled);
+       nvkm_mc_intr_rearm(device);
        return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
@@ -247,7 +245,6 @@ nvkm_device_tegra_func = {
        .fini = nvkm_device_tegra_fini,
        .resource_addr = nvkm_device_tegra_resource_addr,
        .resource_size = nvkm_device_tegra_resource_size,
-       .cpu_coherent = false,
 };
 
 int
@@ -313,6 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
                goto remove;
 
        tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
+       tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
        ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
                               NVKM_DEVICE_TEGRA, pdev->id, NULL,
                               cfg, dbg, detect, mmio, subdev_mask,
index 137066426ed7f9cbb37e24650a95e15225e02765..79a8f71cf7884dc3e2307e12b591a194f8ae841a 100644 (file)
@@ -102,6 +102,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
        case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
        case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
        case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+       case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
        default:
                args->v0.family = 0;
                break;
index a74c5dd27dc0f8774706bc41f81c7d43169aa339..77a52b54a31e3c5627adac7466e1cc96b4e74945 100644 (file)
@@ -10,6 +10,8 @@ nvkm-y += nvkm/engine/disp/gk104.o
 nvkm-y += nvkm/engine/disp/gk110.o
 nvkm-y += nvkm/engine/disp/gm107.o
 nvkm-y += nvkm/engine/disp/gm200.o
+nvkm-y += nvkm/engine/disp/gp100.o
+nvkm-y += nvkm/engine/disp/gp104.o
 
 nvkm-y += nvkm/engine/disp/outp.o
 nvkm-y += nvkm/engine/disp/outpdp.o
@@ -18,6 +20,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
 nvkm-y += nvkm/engine/disp/sornv50.o
 nvkm-y += nvkm/engine/disp/sorg94.o
 nvkm-y += nvkm/engine/disp/sorgf119.o
+nvkm-y += nvkm/engine/disp/sorgm107.o
 nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/dport.o
 
@@ -44,12 +47,15 @@ nvkm-y += nvkm/engine/disp/rootgk104.o
 nvkm-y += nvkm/engine/disp/rootgk110.o
 nvkm-y += nvkm/engine/disp/rootgm107.o
 nvkm-y += nvkm/engine/disp/rootgm200.o
+nvkm-y += nvkm/engine/disp/rootgp100.o
+nvkm-y += nvkm/engine/disp/rootgp104.o
 
 nvkm-y += nvkm/engine/disp/channv50.o
 nvkm-y += nvkm/engine/disp/changf119.o
 
 nvkm-y += nvkm/engine/disp/dmacnv50.o
 nvkm-y += nvkm/engine/disp/dmacgf119.o
+nvkm-y += nvkm/engine/disp/dmacgp104.o
 
 nvkm-y += nvkm/engine/disp/basenv50.o
 nvkm-y += nvkm/engine/disp/baseg84.o
@@ -58,6 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
 nvkm-y += nvkm/engine/disp/basegf119.o
 nvkm-y += nvkm/engine/disp/basegk104.o
 nvkm-y += nvkm/engine/disp/basegk110.o
+nvkm-y += nvkm/engine/disp/basegp104.o
 
 nvkm-y += nvkm/engine/disp/corenv50.o
 nvkm-y += nvkm/engine/disp/coreg84.o
@@ -69,6 +76,8 @@ nvkm-y += nvkm/engine/disp/coregk104.o
 nvkm-y += nvkm/engine/disp/coregk110.o
 nvkm-y += nvkm/engine/disp/coregm107.o
 nvkm-y += nvkm/engine/disp/coregm200.o
+nvkm-y += nvkm/engine/disp/coregp100.o
+nvkm-y += nvkm/engine/disp/coregp104.o
 
 nvkm-y += nvkm/engine/disp/ovlynv50.o
 nvkm-y += nvkm/engine/disp/ovlyg84.o
@@ -76,6 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
 nvkm-y += nvkm/engine/disp/ovlygt215.o
 nvkm-y += nvkm/engine/disp/ovlygf119.o
 nvkm-y += nvkm/engine/disp/ovlygk104.o
+nvkm-y += nvkm/engine/disp/ovlygp104.o
 
 nvkm-y += nvkm/engine/disp/piocnv50.o
 nvkm-y += nvkm/engine/disp/piocgf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
new file mode 100644 (file)
index 0000000..51688e3
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_base_oclass = {
+       .base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = nv50_disp_base_new,
+       .func = &gp104_disp_dmac_func,
+       .mthd = &gf119_disp_base_chan_mthd,
+       .chid = 1,
+};
index aee374884c960394490655c66055d35ec1fdd2a6..f5f683d9fd20225815518f4867c2e4a873248d9f 100644 (file)
@@ -85,6 +85,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
 extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
 
 extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
+extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
 
 struct nv50_disp_pioc_oclass {
        int (*ctor)(const struct nv50_disp_chan_func *,
index 6b1dc703dac7e77bdaf778d90bc1e46895fee49c..21fbf89b6319fba4f155acceee2a61c5b398b2cf 100644 (file)
@@ -171,7 +171,7 @@ gf119_disp_core_chan_mthd = {
        }
 };
 
-static void
+void
 gf119_disp_core_fini(struct nv50_disp_dmac *chan)
 {
        struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
new file mode 100644 (file)
index 0000000..d5dff66
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp100_disp_core_oclass = {
+       .base.oclass = GP100_DISP_CORE_CHANNEL_DMA,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = nv50_disp_core_new,
+       .func = &gf119_disp_core_func,
+       .mthd = &gk104_disp_core_chan_mthd,
+       .chid = 0,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
new file mode 100644 (file)
index 0000000..6922f40
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+#include <nvif/class.h>
+
+static int
+gp104_disp_core_init(struct nv50_disp_dmac *chan)
+{
+       struct nv50_disp *disp = chan->base.root->disp;
+       struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+
+       /* enable error reporting */
+       nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
+
+       /* initialise channel for dma command submission */
+       nvkm_wr32(device, 0x611494, chan->push);
+       nvkm_wr32(device, 0x611498, 0x00010000);
+       nvkm_wr32(device, 0x61149c, 0x00000001);
+       nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
+       nvkm_wr32(device, 0x640000, 0x00000000);
+       nvkm_wr32(device, 0x610490, 0x01000013);
+
+       /* wait for it to go inactive */
+       if (nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
+                       break;
+       ) < 0) {
+               nvkm_error(subdev, "core init: %08x\n",
+                          nvkm_rd32(device, 0x610490));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_core_func = {
+       .init = gp104_disp_core_init,
+       .fini = gf119_disp_core_fini,
+       .bind = gf119_disp_dmac_bind,
+};
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_core_oclass = {
+       .base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = nv50_disp_core_new,
+       .func = &gp104_disp_core_func,
+       .mthd = &gk104_disp_core_chan_mthd,
+       .chid = 0,
+};
index 876b14549a587e32effac90da6f8b7d957afdb38..a57f7cef307a89fae14784ee369b9bc9733ab930 100644 (file)
@@ -36,7 +36,7 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
                                 chan->base.chid << 27 | 0x00000001);
 }
 
-static void
+void
 gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
 {
        struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
new file mode 100644 (file)
index 0000000..ad24c2c
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <subdev/timer.h>
+
+static int
+gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
+{
+       struct nv50_disp *disp = chan->base.root->disp;
+       struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       int chid = chan->base.chid;
+
+       /* enable error reporting */
+       nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
+
+       /* initialise channel for dma command submission */
+       nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
+       nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
+       nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
+       nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
+       nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
+       nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
+
+       /* wait for it to go inactive */
+       if (nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
+                       break;
+       ) < 0) {
+               nvkm_error(subdev, "ch %d init: %08x\n", chid,
+                          nvkm_rd32(device, 0x610490 + (chid * 0x10)));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+const struct nv50_disp_dmac_func
+gp104_disp_dmac_func = {
+       .init = gp104_disp_dmac_init,
+       .fini = gf119_disp_dmac_fini,
+       .bind = gf119_disp_dmac_bind,
+};
index fc84eb8b5c451e74722ecc0e73486dff7c953ec7..43ac0585785375423c3d287779fd910f55461469 100644 (file)
@@ -25,8 +25,12 @@ int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
 extern const struct nv50_disp_dmac_func nv50_disp_core_func;
 
 extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
+void gf119_disp_dmac_fini(struct nv50_disp_dmac *);
 int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
 extern const struct nv50_disp_dmac_func gf119_disp_core_func;
+void gf119_disp_core_fini(struct nv50_disp_dmac *);
+
+extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
 
 struct nv50_disp_dmac_oclass {
        int (*ctor)(const struct nv50_disp_dmac_func *,
@@ -88,4 +92,10 @@ extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
 extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
 
 extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
+
+extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
+extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
 #endif
index f0314664349c3b2c39ef650e1ecdc636bde5aae1..29e84b241cca9510a84fca47c34ae56538d6c1c3 100644 (file)
@@ -79,8 +79,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
        list_for_each_entry(outp, &disp->base.outp, head) {
                if ((outp->info.hasht & 0xff) == type &&
                    (outp->info.hashm & mask) == mask) {
-                       *data = nvbios_outp_match(bios, outp->info.hasht,
-                                                       outp->info.hashm,
+                       *data = nvbios_outp_match(bios, outp->info.hasht, mask,
                                                  ver, hdr, cnt, len, info);
                        if (!*data)
                                return NULL;
@@ -155,25 +154,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
        if (!outp)
                return NULL;
 
+       *conf = (ctrl & 0x00000f00) >> 8;
        switch (outp->info.type) {
        case DCB_OUTPUT_TMDS:
-               *conf = (ctrl & 0x00000f00) >> 8;
                if (*conf == 5)
                        *conf |= 0x0100;
                break;
        case DCB_OUTPUT_LVDS:
-               *conf = disp->sor.lvdsconf;
+               *conf |= disp->sor.lvdsconf;
                break;
-       case DCB_OUTPUT_DP:
-               *conf = (ctrl & 0x00000f00) >> 8;
-               break;
-       case DCB_OUTPUT_ANALOG:
        default:
-               *conf = 0x00ff;
                break;
        }
 
-       data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+       data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+                                &ver, &hdr, &cnt, &len, &info2);
        if (data && id < 0xff) {
                data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
                if (data) {
@@ -418,7 +413,7 @@ gf119_disp_intr_supervisor(struct work_struct *work)
        nvkm_wr32(device, 0x6101d0, 0x80000000);
 }
 
-static void
+void
 gf119_disp_intr_error(struct nv50_disp *disp, int chid)
 {
        struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -466,7 +461,7 @@ gf119_disp_intr(struct nv50_disp *disp)
                u32 stat = nvkm_rd32(device, 0x61009c);
                int chid = ffs(stat) - 1;
                if (chid >= 0)
-                       gf119_disp_intr_error(disp, chid);
+                       disp->func->intr_error(disp, chid);
                intr &= ~0x00000002;
        }
 
@@ -510,6 +505,7 @@ gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
 static const struct nv50_disp_func
 gf119_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gf119_disp_root_oclass,
index a86384b8e388defac9284aec848212317abec765..37f145cf30d78345c2c599ca9174994fc164b81f 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_func
 gk104_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gk104_disp_root_oclass,
index 0d574c7e594ae04f3d2829359e6e3a5a86cdddf2..e14ac946608ca7d39c870b12f770ab4f8450e6f4 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_func
 gk110_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gk110_disp_root_oclass,
index b6944142d616f91fba4886521994b60ee50a18e0..2f2437cc5891553a876a9a2f02605a6b3de33cdb 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_func
 gm107_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gm107_disp_root_oclass,
@@ -36,7 +37,7 @@ gm107_disp = {
        .outp.internal.crt = nv50_dac_output_new,
        .outp.internal.tmds = nv50_sor_output_new,
        .outp.internal.lvds = nv50_sor_output_new,
-       .outp.internal.dp = gf119_sor_dp_new,
+       .outp.internal.dp = gm107_sor_dp_new,
        .dac.nr = 3,
        .dac.power = nv50_dac_power,
        .dac.sense = nv50_dac_sense,
index 67eec86207194e3d7894e51037b6e81f2d4d86e4..9f368d4ee61e1fb6fe1bf404692714d84a525031 100644 (file)
@@ -27,6 +27,7 @@
 static const struct nv50_disp_func
 gm200_disp = {
        .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
        .uevent = &gf119_disp_chan_uevent,
        .super = gf119_disp_intr_supervisor,
        .root = &gm200_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
new file mode 100644 (file)
index 0000000..4f81bf3
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+gp100_disp = {
+       .intr = gf119_disp_intr,
+       .intr_error = gf119_disp_intr_error,
+       .uevent = &gf119_disp_chan_uevent,
+       .super = gf119_disp_intr_supervisor,
+       .root = &gp100_disp_root_oclass,
+       .head.vblank_init = gf119_disp_vblank_init,
+       .head.vblank_fini = gf119_disp_vblank_fini,
+       .head.scanoutpos = gf119_disp_root_scanoutpos,
+       .outp.internal.crt = nv50_dac_output_new,
+       .outp.internal.tmds = nv50_sor_output_new,
+       .outp.internal.lvds = nv50_sor_output_new,
+       .outp.internal.dp = gm200_sor_dp_new,
+       .dac.nr = 3,
+       .dac.power = nv50_dac_power,
+       .dac.sense = nv50_dac_sense,
+       .sor.nr = 4,
+       .sor.power = nv50_sor_power,
+       .sor.hda_eld = gf119_hda_eld,
+       .sor.hdmi = gk104_hdmi_ctrl,
+       .sor.magic = gm200_sor_magic,
+};
+
+int
+gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+       return gf119_disp_new_(&gp100_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
new file mode 100644 (file)
index 0000000..3bf3380
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "rootnv50.h"
+
+static void
+gp104_disp_intr_error(struct nv50_disp *disp, int chid)
+{
+       struct nvkm_subdev *subdev = &disp->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12));
+       u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12));
+       u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12));
+
+       nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
+                  chid, (mthd & 0x0000ffc), data, mthd, unkn);
+
+       if (chid < ARRAY_SIZE(disp->chan)) {
+               switch (mthd & 0xffc) {
+               case 0x0080:
+                       nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       nvkm_wr32(device, 0x61009c, (1 << chid));
+       nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000);
+}
+
+static const struct nv50_disp_func
+gp104_disp = {
+       .intr = gf119_disp_intr,
+       .intr_error = gp104_disp_intr_error,
+       .uevent = &gf119_disp_chan_uevent,
+       .super = gf119_disp_intr_supervisor,
+       .root = &gp104_disp_root_oclass,
+       .head.vblank_init = gf119_disp_vblank_init,
+       .head.vblank_fini = gf119_disp_vblank_fini,
+       .head.scanoutpos = gf119_disp_root_scanoutpos,
+       .outp.internal.crt = nv50_dac_output_new,
+       .outp.internal.tmds = nv50_sor_output_new,
+       .outp.internal.lvds = nv50_sor_output_new,
+       .outp.internal.dp = gm200_sor_dp_new,
+       .dac.nr = 3,
+       .dac.power = nv50_dac_power,
+       .dac.sense = nv50_dac_sense,
+       .sor.nr = 4,
+       .sor.power = nv50_sor_power,
+       .sor.hda_eld = gf119_hda_eld,
+       .sor.hdmi = gk104_hdmi_ctrl,
+       .sor.magic = gm200_sor_magic,
+};
+
+int
+gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+       return gf119_disp_new_(&gp104_disp, device, index, pdisp);
+}
index 4226d2153b9c2a1f8072697fbb77c1b7838e9e09..fbb8c7dc18fd2b67a880432e9a0144e2ee65c57a 100644 (file)
@@ -32,6 +32,7 @@
 #include <subdev/bios/init.h>
 #include <subdev/bios/pll.h>
 #include <subdev/devinit.h>
+#include <subdev/timer.h>
 
 static const struct nvkm_disp_oclass *
 nv50_disp_root_(struct nvkm_disp *base)
@@ -269,8 +270,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
        list_for_each_entry(outp, &disp->base.outp, head) {
                if ((outp->info.hasht & 0xff) == type &&
                    (outp->info.hashm & mask) == mask) {
-                       *data = nvbios_outp_match(bios, outp->info.hasht,
-                                                       outp->info.hashm,
+                       *data = nvbios_outp_match(bios, outp->info.hasht, mask,
                                                  ver, hdr, cnt, len, info);
                        if (!*data)
                                return NULL;
@@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
        if (!outp)
                return NULL;
 
+       *conf = (ctrl & 0x00000f00) >> 8;
        if (outp->info.location == 0) {
                switch (outp->info.type) {
                case DCB_OUTPUT_TMDS:
-                       *conf = (ctrl & 0x00000f00) >> 8;
                        if (*conf == 5)
                                *conf |= 0x0100;
                        break;
                case DCB_OUTPUT_LVDS:
-                       *conf = disp->sor.lvdsconf;
+                       *conf |= disp->sor.lvdsconf;
                        break;
-               case DCB_OUTPUT_DP:
-                       *conf = (ctrl & 0x00000f00) >> 8;
-                       break;
-               case DCB_OUTPUT_ANALOG:
                default:
-                       *conf = 0x00ff;
                        break;
                }
        } else {
@@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
                pclk = pclk / 2;
        }
 
-       data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
+       data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
+                                &ver, &hdr, &cnt, &len, &info2);
        if (data && id < 0xff) {
                data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
                if (data) {
@@ -430,6 +426,134 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
        return outp;
 }
 
+static bool
+nv50_disp_dptmds_war(struct nvkm_device *device)
+{
+       switch (device->chipset) {
+       case 0x94:
+       case 0x96:
+       case 0x98:
+       case 0xaa:
+       case 0xac:
+               return true;
+       default:
+               break;
+       }
+       return false;
+}
+
+static bool
+nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp)
+{
+       struct nvkm_device *device = disp->base.engine.subdev.device;
+       const u32 soff = __ffs(outp->or) * 0x800;
+       if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) {
+               switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
+               case 0x00000000:
+               case 0x00030000:
+                       return true;
+               default:
+                       break;
+               }
+       }
+       return false;
+
+}
+
+static void
+nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp)
+{
+       struct nvkm_device *device = disp->base.engine.subdev.device;
+       const u32 soff = __ffs(outp->or) * 0x800;
+
+       if (!nv50_disp_dptmds_war_needed(disp, outp))
+               return;
+
+       nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
+       nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
+       nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
+
+       nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
+       nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
+       nvkm_usec(device, 400, NVKM_DELAY);
+       nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
+       nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
+
+       if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
+               u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+               u32  pu_pc = seqctl & 0x0000000f;
+               nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
+       }
+}
+
+static void
+nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp)
+{
+       struct nvkm_device *device = disp->base.engine.subdev.device;
+       const u32 soff = __ffs(outp->or) * 0x800;
+       u32 sorpwr;
+
+       if (!nv50_disp_dptmds_war_needed(disp, outp))
+               return;
+
+       sorpwr = nvkm_rd32(device, 0x61c004 + soff);
+       if (sorpwr & 0x00000001) {
+               u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
+               u32  pd_pc = (seqctl & 0x00000f00) >> 8;
+               u32  pu_pc =  seqctl & 0x0000000f;
+
+               nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
+
+               nvkm_msec(device, 2000,
+                       if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+                               break;
+               );
+               nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
+               nvkm_msec(device, 2000,
+                       if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
+                               break;
+               );
+
+               nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
+               nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
+       }
+
+       nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
+       nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
+
+       if (sorpwr & 0x00000001) {
+               nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
+       }
+}
+
+static void
+nv50_disp_update_sppll1(struct nv50_disp *disp)
+{
+       struct nvkm_device *device = disp->base.engine.subdev.device;
+       bool used = false;
+       int sor;
+
+       if (!nv50_disp_dptmds_war(device))
+               return;
+
+       for (sor = 0; sor < disp->func->sor.nr; sor++) {
+               u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800));
+               switch (clksor & 0x03000000) {
+               case 0x02000000:
+               case 0x03000000:
+                       used = true;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       if (used)
+               return;
+
+       nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
+}
+
 static void
 nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
 {
@@ -683,6 +807,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
 
        nvkm_mask(device, hreg, 0x0000000f, hval);
        nvkm_mask(device, oreg, mask, oval);
+
+       nv50_disp_dptmds_war_2(disp, &outp->info);
 }
 
 /* If programming a TMDS output on a SOR that can also be configured for
@@ -724,6 +850,7 @@ nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
 
        if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
                nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
+       nv50_disp_dptmds_war_3(disp, &outp->info);
 }
 
 void
@@ -771,6 +898,7 @@ nv50_disp_intr_supervisor(struct work_struct *work)
                                continue;
                        nv50_disp_intr_unk40_0(disp, head);
                }
+               nv50_disp_update_sppll1(disp);
        }
 
        nvkm_wr32(device, 0x610030, 0x80000000);
index aecebd8717e5ccbdea2f11903faee88474d2163f..1e1de6bfe85a25eb00d881a5a266cd6fc1b36e49 100644 (file)
@@ -68,6 +68,7 @@ struct nv50_disp_func_outp {
 
 struct nv50_disp_func {
        void (*intr)(struct nv50_disp *);
+       void (*intr_error)(struct nv50_disp *, int chid);
 
        const struct nvkm_event_func *uevent;
        void (*super)(struct work_struct *);
@@ -114,4 +115,5 @@ void gf119_disp_vblank_init(struct nv50_disp *, int);
 void gf119_disp_vblank_fini(struct nv50_disp *, int);
 void gf119_disp_intr(struct nv50_disp *);
 void gf119_disp_intr_supervisor(struct work_struct *);
+void gf119_disp_intr_error(struct nv50_disp *, int);
 #endif
index e9067ba4e1790d1e70a5cce27035f30234bc8856..4e983f6d7032e897e033085a196405d7c20d9fec 100644 (file)
@@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
 int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
                     struct nvkm_output **);
 int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
+int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
 
-int  gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
-                     struct nvkm_output **);
+int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+                    struct nvkm_output **);
+int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
+
+int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
+                    struct nvkm_output **);
 #endif
index 2e2dc0641ef2b37b3c422e1432a7676b14b09c0b..2f0220b39f3469ba146a73efd94307c6b281d3b7 100644 (file)
@@ -80,7 +80,7 @@ gk104_disp_ovly_mthd_base = {
        }
 };
 
-static const struct nv50_disp_chan_mthd
+const struct nv50_disp_chan_mthd
 gk104_disp_ovly_chan_mthd = {
        .name = "Overlay",
        .addr = 0x001000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
new file mode 100644 (file)
index 0000000..97e2dd2
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "dmacnv50.h"
+#include "rootnv50.h"
+
+#include <nvif/class.h>
+
+const struct nv50_disp_dmac_oclass
+gp104_disp_ovly_oclass = {
+       .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = nv50_disp_ovly_new,
+       .func = &gp104_disp_dmac_func,
+       .mthd = &gk104_disp_ovly_chan_mthd,
+       .chid = 5,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
new file mode 100644 (file)
index 0000000..ac8fdd7
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp100_disp_root = {
+       .init = gf119_disp_root_init,
+       .fini = gf119_disp_root_fini,
+       .dmac = {
+               &gp100_disp_core_oclass,
+               &gk110_disp_base_oclass,
+               &gk104_disp_ovly_oclass,
+       },
+       .pioc = {
+               &gk104_disp_oimm_oclass,
+               &gk104_disp_curs_oclass,
+       },
+};
+
+static int
+gp100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+                   void *data, u32 size, struct nvkm_object **pobject)
+{
+       return nv50_disp_root_new_(&gp100_disp_root, disp, oclass,
+                                  data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp100_disp_root_oclass = {
+       .base.oclass = GP100_DISP,
+       .base.minver = -1,
+       .base.maxver = -1,
+       .ctor = gp100_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
new file mode 100644 (file)
index 0000000..8443e04
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "rootnv50.h"
+#include "dmacnv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+gp104_disp_root = {
+       .init = gf119_disp_root_init,
+       .fini = gf119_disp_root_fini,
+       .dmac = {
+               &gp104_disp_core_oclass,
+               &gp104_disp_base_oclass,
+               &gp104_disp_ovly_oclass,
+       },
+       .pioc = {
+               &gk104_disp_oimm_oclass,
+               &gk104_disp_curs_oclass,
+       },
+};
+
+static int
+gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+                   void *data, u32 size, struct nvkm_object **pobject)
+{
+       return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
+                                  data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+gp104_disp_root_oclass = {
+       .base.oclass = GP104_DISP,
+       .base.minver = -1,
+       .base.maxver = -1,
+       .ctor = gp104_disp_root_new,
+};
index cb449ed8d92c45c6543a2249f60c471438d4ba66..ad00f1724b729c60ff749c8cf601533aa4f05d55 100644 (file)
@@ -40,4 +40,6 @@ extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
 extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
 extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
 extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
+extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
 #endif
index b4b41b13564344978517fad5c1a5622430fc1720..49bd5da194e1b368b0d069825e6fb03b03166684 100644 (file)
@@ -40,8 +40,8 @@ static int
 gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
        struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-       const u32 loff = gf119_sor_loff(outp);
-       nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
+       const u32 soff = gf119_sor_soff(outp);
+       nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
        return 0;
 }
 
@@ -64,7 +64,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
        return 0;
 }
 
-static int
+int
 gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
                     int ln, int vs, int pe, int pc)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
new file mode 100644 (file)
index 0000000..37790b2
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "nv50.h"
+#include "outpdp.h"
+
+int
+gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
+{
+       struct nvkm_device *device = outp->base.disp->engine.subdev.device;
+       const u32 soff = outp->base.or * 0x800;
+       const u32 data = 0x01010101 * pattern;
+       if (outp->base.info.sorconf.link & 1)
+               nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
+       else
+               nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
+       return 0;
+}
+
+static const struct nvkm_output_dp_func
+gm107_sor_dp_func = {
+       .pattern = gm107_sor_dp_pattern,
+       .lnk_pwr = g94_sor_dp_lnk_pwr,
+       .lnk_ctl = gf119_sor_dp_lnk_ctl,
+       .drv_ctl = gf119_sor_dp_drv_ctl,
+};
+
+int
+gm107_sor_dp_new(struct nvkm_disp *disp, int index,
+                struct dcb_output *dcbE, struct nvkm_output **poutp)
+{
+       return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
+}
index 2cfbef9c344ff68ba9e71805cbbe911aed4ddf5e..c44fa7ea672ab20192c8498e418018492ad93252 100644 (file)
@@ -56,19 +56,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
        return lane * 0x08;
 }
 
-static int
-gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
-{
-       struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-       const u32 soff = gm200_sor_soff(outp);
-       const u32 data = 0x01010101 * pattern;
-       if (outp->base.info.sorconf.link & 1)
-               nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
-       else
-               nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
-       return 0;
-}
-
 static int
 gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
 {
@@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
 
 static const struct nvkm_output_dp_func
 gm200_sor_dp_func = {
-       .pattern = gm200_sor_dp_pattern,
+       .pattern = gm107_sor_dp_pattern,
        .lnk_pwr = gm200_sor_dp_lnk_pwr,
        .lnk_ctl = gf119_sor_dp_lnk_ctl,
        .drv_ctl = gm200_sor_dp_drv_ctl,
index 65e5d291ecda000ce02e711779b62be285e5c67f..98651a43bc12fe5bec383eb88fee6b228e7f1e42 100644 (file)
@@ -13,6 +13,7 @@ nvkm-y += nvkm/engine/fifo/gk20a.o
 nvkm-y += nvkm/engine/fifo/gm107.o
 nvkm-y += nvkm/engine/fifo/gm200.o
 nvkm-y += nvkm/engine/fifo/gm20b.o
+nvkm-y += nvkm/engine/fifo/gp100.o
 
 nvkm-y += nvkm/engine/fifo/chan.o
 nvkm-y += nvkm/engine/fifo/channv50.o
@@ -31,3 +32,4 @@ nvkm-y += nvkm/engine/fifo/gpfifogf100.o
 nvkm-y += nvkm/engine/fifo/gpfifogk104.o
 nvkm-y += nvkm/engine/fifo/gpfifogk110.o
 nvkm-y += nvkm/engine/fifo/gpfifogm200.o
+nvkm-y += nvkm/engine/fifo/gpfifogp100.o
index e06f4d46f802e9cb17015d70fbb3f31cdbe18446..230f64e5f73180dbc944a87296de2f801456316d 100644 (file)
@@ -27,4 +27,5 @@ int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
 extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
 extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
 extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
+extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass;
 #endif
index 743f3a189f285dd6234a54efe6e5d50009fbf25e..103c0afaaa6d1414169276bc7af6b672bb7bedf9 100644 (file)
@@ -329,7 +329,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
        }
 
        if (eu == NULL) {
-               enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit);
+               enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
                if (engidx < NVKM_SUBDEV_NR) {
                        const char *src = nvkm_subdev_name[engidx];
                        char *dst = en;
@@ -589,7 +589,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
        struct gk104_fifo *fifo = gk104_fifo(base);
        struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
        struct nvkm_device *device = subdev->device;
-       struct nvkm_top *top = device->top;
        int engn, runl, pbid, ret, i, j;
        enum nvkm_devidx engidx;
        u32 *map;
@@ -608,7 +607,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
 
        /* Determine runlist configuration from topology device info. */
        i = 0;
-       while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) {
+       while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
                /* Determine which PBDMA handles requests for this engine. */
                for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
                        if (map[j] & (1 << runl)) {
@@ -617,8 +616,8 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
                        }
                }
 
-               nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n",
-                          engn, runl, pbid);
+               nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
+                          engn, runl, pbid, nvkm_subdev_name[engidx]);
 
                fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
                fifo->engine[engn].runl = runl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
new file mode 100644 (file)
index 0000000..eff83f7
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct nvkm_enum
+gp100_fifo_fault_engine[] = {
+       { 0x01, "DISPLAY" },
+       { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+       { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+       { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+       { 0x06, "HOST0" },
+       { 0x07, "HOST1" },
+       { 0x08, "HOST2" },
+       { 0x09, "HOST3" },
+       { 0x0a, "HOST4" },
+       { 0x0b, "HOST5" },
+       { 0x0c, "HOST6" },
+       { 0x0d, "HOST7" },
+       { 0x0e, "HOST8" },
+       { 0x0f, "HOST9" },
+       { 0x10, "HOST10" },
+       { 0x13, "PERF" },
+       { 0x17, "PMU" },
+       { 0x18, "PTP" },
+       { 0x1f, "PHYSICAL" },
+       {}
+};
+
+static const struct gk104_fifo_func
+gp100_fifo = {
+       .fault.engine = gp100_fifo_fault_engine,
+       .fault.reason = gk104_fifo_fault_reason,
+       .fault.hubclient = gk104_fifo_fault_hubclient,
+       .fault.gpcclient = gk104_fifo_fault_gpcclient,
+       .chan = {
+               &gp100_fifo_gpfifo_oclass,
+               NULL
+       },
+};
+
+int
+gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+       return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
new file mode 100644 (file)
index 0000000..1530a92
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "changk104.h"
+
+#include <nvif/class.h>
+
+const struct nvkm_fifo_chan_oclass
+gp100_fifo_gpfifo_oclass = {
+       .base.oclass = PASCAL_CHANNEL_GPFIFO_A,
+       .base.minver = 0,
+       .base.maxver = 0,
+       .ctor = gk104_fifo_gpfifo_new,
+};
index 290ed0db8047146b6f2015d39e9373908d064196..f1c494182248a5e6a65519a7b394dabff9f9ed72 100644 (file)
@@ -31,6 +31,7 @@ nvkm-y += nvkm/engine/gr/gk20a.o
 nvkm-y += nvkm/engine/gr/gm107.o
 nvkm-y += nvkm/engine/gr/gm200.o
 nvkm-y += nvkm/engine/gr/gm20b.o
+nvkm-y += nvkm/engine/gr/gp100.o
 
 nvkm-y += nvkm/engine/gr/ctxnv40.o
 nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -48,3 +49,4 @@ nvkm-y += nvkm/engine/gr/ctxgk20a.o
 nvkm-y += nvkm/engine/gr/ctxgm107.o
 nvkm-y += nvkm/engine/gr/ctxgm200.o
 nvkm-y += nvkm/engine/gr/ctxgm20b.o
+nvkm-y += nvkm/engine/gr/ctxgp100.o
index b02d8f50ea6aff4549a27b3e3917da5b4731b883..bc77eea351a5c78351feeadbe244591fa6b6810a 100644 (file)
@@ -1240,7 +1240,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        const struct gf100_grctx_func *grctx = gr->func->grctx;
        u32 idle_timeout;
 
-       nvkm_mc_unk260(device->mc, 0);
+       nvkm_mc_unk260(device, 0);
 
        gf100_gr_mmio(gr, grctx->hub);
        gf100_gr_mmio(gr, grctx->gpc);
@@ -1264,7 +1264,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        gf100_gr_icmd(gr, grctx->icmd);
        nvkm_wr32(device, 0x404154, idle_timeout);
        gf100_gr_mthd(gr, grctx->mthd);
-       nvkm_mc_unk260(device->mc, 1);
+       nvkm_mc_unk260(device, 1);
 }
 
 int
index ac895edce164558fff1893405aef32f5cbf486ff..52048b5a527489b5d960c24a279f9d1972f23c10 100644 (file)
@@ -101,6 +101,8 @@ void gm200_grctx_generate_405b60(struct gf100_gr *);
 
 extern const struct gf100_grctx_func gm20b_grctx;
 
+extern const struct gf100_grctx_func gp100_grctx;
+
 /* context init value lists */
 
 extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
index f521de11a299f3cdaac3241fbb62f101ce653dc1..c925ade5880e708d6a2befde5cb2f26d02470fa2 100644 (file)
@@ -226,7 +226,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        u32 idle_timeout;
        int i;
 
-       nvkm_mc_unk260(device->mc, 0);
+       nvkm_mc_unk260(device, 0);
 
        gf100_gr_mmio(gr, grctx->hub);
        gf100_gr_mmio(gr, grctx->gpc);
@@ -253,7 +253,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        gf100_gr_icmd(gr, grctx->icmd);
        nvkm_wr32(device, 0x404154, idle_timeout);
        gf100_gr_mthd(gr, grctx->mthd);
-       nvkm_mc_unk260(device->mc, 1);
+       nvkm_mc_unk260(device, 1);
 }
 
 const struct gf100_grctx_func
index 9ba337778ef550ac0d7ab270f8f8f32105dba54b..c46b3fdf720315984a428beecdbf7d2976dba5c5 100644 (file)
@@ -950,7 +950,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        u32 idle_timeout;
        int i;
 
-       nvkm_mc_unk260(device->mc, 0);
+       nvkm_mc_unk260(device, 0);
 
        gf100_gr_mmio(gr, grctx->hub);
        gf100_gr_mmio(gr, grctx->gpc);
@@ -979,7 +979,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
        gf100_gr_icmd(gr, grctx->icmd);
        nvkm_wr32(device, 0x404154, idle_timeout);
        gf100_gr_mthd(gr, grctx->mthd);
-       nvkm_mc_unk260(device->mc, 1);
+       nvkm_mc_unk260(device, 1);
 
        nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
        nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
new file mode 100644 (file)
index 0000000..3d1ae7d
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+#include <subdev/fb.h>
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static void
+gp100_grctx_generate_pagepool(struct gf100_grctx *info)
+{
+       const struct gf100_grctx_func *grctx = info->gr->func->grctx;
+       const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+       const int s = 8;
+       const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
+       mmio_refn(info, 0x40800c, 0x00000000, s, b);
+       mmio_wr32(info, 0x408010, 0x80000000);
+       mmio_refn(info, 0x419004, 0x00000000, s, b);
+       mmio_wr32(info, 0x419008, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_attrib(struct gf100_grctx *info)
+{
+       struct gf100_gr *gr = info->gr;
+       const struct gf100_grctx_func *grctx = gr->func->grctx;
+       const u32  alpha = grctx->alpha_nr;
+       const u32 attrib = grctx->attrib_nr;
+       const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
+       const u32   size = roundup(gr->tpc_total * pertpc, 0x80);
+       const u32 access = NV_MEM_ACCESS_RW;
+       const int s = 12;
+       const int b = mmio_vram(info, size, (1 << s), access);
+       const int max_batches = 0xffff;
+       u32 ao = 0;
+       u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
+       int gpc, ppc, n = 0;
+
+       mmio_refn(info, 0x418810, 0x80000000, s, b);
+       mmio_refn(info, 0x419848, 0x10000000, s, b);
+       mmio_refn(info, 0x419c2c, 0x10000000, s, b);
+       mmio_refn(info, 0x419b00, 0x00000000, s, b);
+       mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
+       mmio_wr32(info, 0x405830, attrib);
+       mmio_wr32(info, 0x40585c, alpha);
+       mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
+
+       for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+               for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+                       const u32 as =  alpha * gr->ppc_tpc_nr[gpc][ppc];
+                       const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
+                       const u32 u = 0x418ea0 + (n * 0x04);
+                       const u32 o = PPC_UNIT(gpc, ppc, 0);
+                       if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+                               continue;
+                       mmio_wr32(info, o + 0xc0, bs);
+                       mmio_wr32(info, o + 0xf4, bo);
+                       mmio_wr32(info, o + 0xf0, bs);
+                       bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+                       mmio_wr32(info, o + 0xe4, as);
+                       mmio_wr32(info, o + 0xf8, ao);
+                       ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+                       mmio_wr32(info, u, bs);
+               }
+       }
+
+       mmio_wr32(info, 0x418eec, 0x00000000);
+       mmio_wr32(info, 0x41befc, 0x00000000);
+}
+
+static void
+gp100_grctx_generate_405b60(struct gf100_gr *gr)
+{
+       struct nvkm_device *device = gr->base.engine.subdev.device;
+       const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
+       u32 dist[TPC_MAX / 4] = {};
+       u32 gpcs[GPC_MAX * 2] = {};
+       u8  tpcnr[GPC_MAX];
+       int tpc, gpc, i;
+
+       memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+
+       /* won't result in the same distribution as the binary driver where
+        * some of the gpcs have more tpcs than others, but this shall do
+        * for the moment.  the code for earlier gpus has this issue too.
+        */
+       for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
+               do {
+                       gpc = (gpc + 1) % gr->gpc_nr;
+               } while(!tpcnr[gpc]);
+               tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+               dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
+               gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
+       }
+
+       for (i = 0; i < dist_nr; i++)
+               nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
+       for (i = 0; i < gr->gpc_nr * 2; i++)
+               nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
+}
+
+static void
+gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
+{
+       struct nvkm_device *device = gr->base.engine.subdev.device;
+       const struct gf100_grctx_func *grctx = gr->func->grctx;
+       u32 idle_timeout, tmp;
+       int i;
+
+       gf100_gr_mmio(gr, gr->fuc_sw_ctx);
+
+       idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
+
+       grctx->pagepool(info);
+       grctx->bundle(info);
+       grctx->attrib(info);
+       grctx->unkn(gr);
+
+       gm200_grctx_generate_tpcid(gr);
+       gf100_grctx_generate_r406028(gr);
+       gk104_grctx_generate_r418bb8(gr);
+
+       for (i = 0; i < 8; i++)
+               nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
+       nvkm_wr32(device, 0x406500, 0x00000000);
+
+       nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
+
+       for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
+               tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
+       nvkm_wr32(device, 0x4041c4, tmp);
+
+       gp100_grctx_generate_405b60(gr);
+
+       gf100_gr_icmd(gr, gr->fuc_bundle);
+       nvkm_wr32(device, 0x404154, idle_timeout);
+       gf100_gr_mthd(gr, gr->fuc_method);
+}
+
+const struct gf100_grctx_func
+gp100_grctx = {
+       .main  = gp100_grctx_generate_main,
+       .unkn  = gk104_grctx_generate_unkn,
+       .bundle = gm107_grctx_generate_bundle,
+       .bundle_size = 0x3000,
+       .bundle_min_gpm_fifo_depth = 0x180,
+       .bundle_token_limit = 0x1080,
+       .pagepool = gp100_grctx_generate_pagepool,
+       .pagepool_size = 0x20000,
+       .attrib = gp100_grctx_generate_attrib,
+       .attrib_nr_max = 0x660,
+       .attrib_nr = 0x440,
+       .alpha_nr_max = 0xc00,
+       .alpha_nr = 0x800,
+};
index 9513badb8220e8dfa01f4fa5f129692507f90c01..157919c788e6f72261d6989a352c4f6fddf3a014 100644 (file)
@@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
 }
 
 static const struct nvkm_enum gf100_mp_warp_error[] = {
-       { 0x00, "NO_ERROR" },
-       { 0x01, "STACK_MISMATCH" },
+       { 0x01, "STACK_ERROR" },
+       { 0x02, "API_STACK_ERROR" },
+       { 0x03, "RET_EMPTY_STACK_ERROR" },
+       { 0x04, "PC_WRAP" },
        { 0x05, "MISALIGNED_PC" },
-       { 0x08, "MISALIGNED_GPR" },
-       { 0x09, "INVALID_OPCODE" },
-       { 0x0d, "GPR_OUT_OF_BOUNDS" },
-       { 0x0e, "MEM_OUT_OF_BOUNDS" },
-       { 0x0f, "UNALIGNED_MEM_ACCESS" },
+       { 0x06, "PC_OVERFLOW" },
+       { 0x07, "MISALIGNED_IMMC_ADDR" },
+       { 0x08, "MISALIGNED_REG" },
+       { 0x09, "ILLEGAL_INSTR_ENCODING" },
+       { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
+       { 0x0b, "ILLEGAL_INSTR_PARAM" },
+       { 0x0c, "INVALID_CONST_ADDR" },
+       { 0x0d, "OOR_REG" },
+       { 0x0e, "OOR_ADDR" },
+       { 0x0f, "MISALIGNED_ADDR" },
        { 0x10, "INVALID_ADDR_SPACE" },
-       { 0x11, "INVALID_PARAM" },
+       { 0x11, "ILLEGAL_INSTR_PARAM2" },
+       { 0x12, "INVALID_CONST_ADDR_LDC" },
+       { 0x13, "GEOMETRY_SM_ERROR" },
+       { 0x14, "DIVERGENT" },
+       { 0x15, "WARP_EXIT" },
        {}
 };
 
 static const struct nvkm_bitfield gf100_mp_global_error[] = {
+       { 0x00000001, "SM_TO_SM_FAULT" },
+       { 0x00000002, "L1_ERROR" },
        { 0x00000004, "MULTIPLE_WARP_ERRORS" },
-       { 0x00000008, "OUT_OF_STACK_SPACE" },
+       { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
+       { 0x00000010, "BPT_INT" },
+       { 0x00000020, "BPT_PAUSE" },
+       { 0x00000040, "SINGLE_STEP_COMPLETE" },
+       { 0x20000000, "ECC_SEC_ERROR" },
+       { 0x40000000, "ECC_DED_ERROR" },
+       { 0x80000000, "TIMEOUT" },
        {}
 };
 
@@ -1438,24 +1457,30 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
        struct nvkm_device *device = subdev->device;
        struct nvkm_secboot *sb = device->secboot;
        int i;
+       int ret = 0;
 
        if (gr->firmware) {
                /* load fuc microcode */
-               nvkm_mc_unk260(device->mc, 0);
+               nvkm_mc_unk260(device, 0);
 
                /* securely-managed falcons must be reset using secure boot */
                if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
-                       nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+                       ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
                else
                        gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
                                         &gr->fuc409d);
+               if (ret)
+                       return ret;
+
                if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
-                       nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+                       ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
                else
                        gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
                                         &gr->fuc41ad);
+               if (ret)
+                       return ret;
 
-               nvkm_mc_unk260(device->mc, 1);
+               nvkm_mc_unk260(device, 1);
 
                /* start both of them running */
                nvkm_wr32(device, 0x409840, 0xffffffff);
@@ -1557,7 +1582,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
        }
 
        /* load HUB microcode */
-       nvkm_mc_unk260(device->mc, 0);
+       nvkm_mc_unk260(device, 0);
        nvkm_wr32(device, 0x4091c0, 0x01000000);
        for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
                nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
@@ -1580,7 +1605,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
                        nvkm_wr32(device, 0x41a188, i >> 6);
                nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
        }
-       nvkm_mc_unk260(device->mc, 1);
+       nvkm_mc_unk260(device, 1);
 
        /* load register lists */
        gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
index 2b98abdb92703bec9b56f6834703c77c8d33510b..268b8d60ff73c286215e541be987c204e16a3469 100644 (file)
@@ -292,4 +292,6 @@ extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
 extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
 extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
 void gm107_gr_init_bios(struct gf100_gr *);
+
+void gm200_gr_init_gpc_mmu(struct gf100_gr *);
 #endif
index 4ca8ed15191c6ea158ef1bf8ef1cb6ed4a770661..de8b806b88fd9dfd8cc1e1b864a3974db04ffbf7 100644 (file)
@@ -361,6 +361,5 @@ gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
        if (ret)
                return ret;
 
-
        return 0;
 }
index 4dfa4513bb6c5a0d8ba45ce615f221d3883d06f1..6435f125757232a884c72477c44b19eef84f7d56 100644 (file)
@@ -38,7 +38,7 @@ gm200_gr_rops(struct gf100_gr *gr)
        return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
 }
 
-static void
+void
 gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
 {
        struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
new file mode 100644 (file)
index 0000000..26ad79d
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
+{
+       struct nvkm_device *device = gr->base.engine.subdev.device;
+       /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */
+       const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f;
+       nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+       nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+static int
+gp100_gr_init(struct gf100_gr *gr)
+{
+       struct nvkm_device *device = gr->base.engine.subdev.device;
+       const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
+       u32 data[TPC_MAX / 8] = {};
+       u8  tpcnr[GPC_MAX];
+       int gpc, tpc, rop;
+       int i;
+
+       gr->func->init_gpc_mmu(gr);
+
+       gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
+
+       nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
+
+       memset(data, 0x00, sizeof(data));
+       memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
+       for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
+               do {
+                       gpc = (gpc + 1) % gr->gpc_nr;
+               } while (!tpcnr[gpc]);
+               tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
+
+               data[i / 8] |= tpc << ((i % 8) * 4);
+       }
+
+       nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
+       nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
+       nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
+       nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
+
+       for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
+                         gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
+                                                        gr->tpc_total);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
+       }
+
+       nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+       nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+       nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+
+       gr->func->init_rop_active_fbps(gr);
+
+       nvkm_wr32(device, 0x400500, 0x00010001);
+       nvkm_wr32(device, 0x400100, 0xffffffff);
+       nvkm_wr32(device, 0x40013c, 0xffffffff);
+       nvkm_wr32(device, 0x400124, 0x00000002);
+       nvkm_wr32(device, 0x409c24, 0x000f0002);
+       nvkm_wr32(device, 0x405848, 0xc0000000);
+       nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
+       nvkm_wr32(device, 0x404000, 0xc0000000);
+       nvkm_wr32(device, 0x404600, 0xc0000000);
+       nvkm_wr32(device, 0x408030, 0xc0000000);
+       nvkm_wr32(device, 0x404490, 0xc0000000);
+       nvkm_wr32(device, 0x406018, 0xc0000000);
+       nvkm_wr32(device, 0x407020, 0x40000000);
+       nvkm_wr32(device, 0x405840, 0xc0000000);
+       nvkm_wr32(device, 0x405844, 0x00ffffff);
+       nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+
+       nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
+       nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
+
+       gr->func->init_ppc_exceptions(gr);
+
+       for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+               for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
+                       nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
+               }
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+               nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+       }
+
+       for (rop = 0; rop < gr->rop_nr; rop++) {
+               nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
+               nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
+               nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
+               nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
+       }
+
+       nvkm_wr32(device, 0x400108, 0xffffffff);
+       nvkm_wr32(device, 0x400138, 0xffffffff);
+       nvkm_wr32(device, 0x400118, 0xffffffff);
+       nvkm_wr32(device, 0x400130, 0xffffffff);
+       nvkm_wr32(device, 0x40011c, 0xffffffff);
+       nvkm_wr32(device, 0x400134, 0xffffffff);
+
+       gf100_gr_zbc_init(gr);
+
+       return gf100_gr_init_ctxctl(gr);
+}
+
+static const struct gf100_gr_func
+gp100_gr = {
+       .init = gp100_gr_init,
+       .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+       .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+       .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+       .rops = gm200_gr_rops,
+       .ppc_nr = 2,
+       .grctx = &gp100_grctx,
+       .sclass = {
+               { -1, -1, FERMI_TWOD_A },
+               { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+               { -1, -1, PASCAL_A, &gf100_fermi },
+               { -1, -1, PASCAL_COMPUTE_A },
+               {}
+       }
+};
+
+int
+gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+       return gm200_gr_new_(&gp100_gr, device, index, pgr);
+}
index e15b9627b07e12cacca0bb671813d9d9e3b58628..f3c30b2a788e8534fe72c23729779afd5e39d573 100644 (file)
 #include <subdev/bios.h>
 #include <subdev/bios/bmp.h>
 #include <subdev/bios/bit.h>
+#include <subdev/bios/image.h>
+
+static bool
+nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
+{
+       u32 p = *addr;
+
+       if (*addr > bios->image0_size && bios->imaged_addr) {
+               *addr -= bios->image0_size;
+               *addr += bios->imaged_addr;
+       }
+
+       if (unlikely(*addr + size >= bios->size)) {
+               nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
+               return false;
+       }
+
+       return true;
+}
+
+u8
+nvbios_rd08(struct nvkm_bios *bios, u32 addr)
+{
+       if (likely(nvbios_addr(bios, &addr, 1)))
+               return bios->data[addr];
+       return 0x00;
+}
+
+u16
+nvbios_rd16(struct nvkm_bios *bios, u32 addr)
+{
+       if (likely(nvbios_addr(bios, &addr, 2)))
+               return get_unaligned_le16(&bios->data[addr]);
+       return 0x0000;
+}
+
+u32
+nvbios_rd32(struct nvkm_bios *bios, u32 addr)
+{
+       if (likely(nvbios_addr(bios, &addr, 4)))
+               return get_unaligned_le32(&bios->data[addr]);
+       return 0x00000000;
+}
 
 u8
 nvbios_checksum(const u8 *data, int size)
@@ -100,8 +143,9 @@ int
 nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
 {
        struct nvkm_bios *bios;
+       struct nvbios_image image;
        struct bit_entry bit_i;
-       int ret;
+       int ret, idx = 0;
 
        if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
                return -ENOMEM;
@@ -111,6 +155,19 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
        if (ret)
                return ret;
 
+       /* Some tables have weird pointers that need adjustment before
+        * they're dereferenced.  I'm not entirely sure why...
+        */
+       if (nvbios_image(bios, idx++, &image)) {
+               bios->image0_size = image.size;
+               while (nvbios_image(bios, idx++, &image)) {
+                       if (image.type == 0xe0) {
+                               bios->imaged_addr = image.base;
+                               break;
+                       }
+               }
+       }
+
        /* detect type of vbios we're dealing with */
        bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
                                          "\xff\x7f""NV\0", 5);
index a5e92135cd778682cbf3b06889e536aa25d07bb3..9efb1b48cd54d6d93404da7fedab525e4fb75417 100644 (file)
@@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
 {
        u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
        if (data) {
-               info->match     = nvbios_rd16(bios, data + 0x00);
+               info->proto     = nvbios_rd08(bios, data + 0x00);
+               info->flags     = nvbios_rd16(bios, data + 0x01);
                info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
                info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
        }
@@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
 }
 
 u16
-nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
+nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
                  u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
 {
        u16 data, idx = 0;
        while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
-               if (info->match == type)
+               if ((info->proto == proto || info->proto == 0xff) &&
+                   (info->flags == flags))
                        break;
        }
        return data;
index 05332476354abd4f49ddf9da6c47b2714a01018b..d89e78c4e6890c43f361d1f1acb3ad252a57b844 100644 (file)
@@ -40,6 +40,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
                                case 0x30:
                                case 0x40:
                                case 0x41:
+                               case 0x42:
                                        *hdr = nvbios_rd08(bios, data + 0x01);
                                        *len = nvbios_rd08(bios, data + 0x02);
                                        *cnt = nvbios_rd08(bios, data + 0x03);
@@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
                        break;
                case 0x40:
                case 0x41:
+               case 0x42:
                        *hdr = nvbios_rd08(bios, data + 0x04);
                        *cnt = 0;
                        *len = 0;
@@ -109,6 +111,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
                        break;
                case 0x40:
                case 0x41:
+               case 0x42:
                        info->flags     = nvbios_rd08(bios, data + 0x04);
                        info->script[0] = nvbios_rd16(bios, data + 0x05);
                        info->script[1] = nvbios_rd16(bios, data + 0x07);
@@ -180,6 +183,11 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
                        info->pe    = nvbios_rd08(bios, data + 0x02);
                        info->tx_pu = nvbios_rd08(bios, data + 0x03);
                        break;
+               case 0x42:
+                       info->dc    = nvbios_rd08(bios, data + 0x00);
+                       info->pe    = nvbios_rd08(bios, data + 0x01);
+                       info->tx_pu = nvbios_rd08(bios, data + 0x02);
+                       break;
                default:
                        data = 0x0000;
                        break;
index 74b14cf093082a1208ee707f1df35a04c822cde0..1dbff7aeafec2ecf2006d588e3656c70071fb1c2 100644 (file)
@@ -68,11 +68,16 @@ nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
 bool
 nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image)
 {
+       u32 imaged_addr = bios->imaged_addr;
        memset(image, 0x00, sizeof(*image));
+       bios->imaged_addr = 0;
        do {
                image->base += image->size;
-               if (image->last || !nvbios_imagen(bios, image))
+               if (image->last || !nvbios_imagen(bios, image)) {
+                       bios->imaged_addr = imaged_addr;
                        return false;
+               }
        } while(idx--);
+       bios->imaged_addr = imaged_addr;
        return true;
 }
index 91a7dc56e406980ccf225fb76f232a9ab1a983fd..2ca23a9157ab9345af8d99681b204ab4cc6f52bd 100644 (file)
@@ -77,15 +77,17 @@ g84_pll_mapping[] = {
        {}
 };
 
-static u16
+static u32
 pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
        struct bit_entry bit_C;
-       u16 data = 0x0000;
+       u32 data = 0x0000;
 
        if (!bit_entry(bios, 'C', &bit_C)) {
                if (bit_C.version == 1 && bit_C.length >= 10)
                        data = nvbios_rd16(bios, bit_C.offset + 8);
+               if (bit_C.version == 2 && bit_C.length >= 4)
+                       data = nvbios_rd32(bios, bit_C.offset + 0);
                if (data) {
                        *ver = nvbios_rd08(bios, data + 0);
                        *hdr = nvbios_rd08(bios, data + 1);
@@ -137,12 +139,12 @@ pll_map(struct nvkm_bios *bios)
        }
 }
 
-static u16
+static u32
 pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
 {
        struct pll_mapping *map;
        u8  hdr, cnt;
-       u16 data;
+       u32 data;
 
        data = pll_limits_table(bios, ver, &hdr, &cnt, len);
        if (data && *ver >= 0x30) {
@@ -160,7 +162,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
        map = pll_map(bios);
        while (map && map->reg) {
                if (map->reg == reg && *ver >= 0x20) {
-                       u16 addr = (data += hdr);
+                       u32 addr = (data += hdr);
                        *type = map->type;
                        while (cnt--) {
                                if (nvbios_rd32(bios, data) == map->reg)
@@ -179,12 +181,12 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
        return 0x0000;
 }
 
-static u16
+static u32
 pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
 {
        struct pll_mapping *map;
        u8  hdr, cnt;
-       u16 data;
+       u32 data;
 
        data = pll_limits_table(bios, ver, &hdr, &cnt, len);
        if (data && *ver >= 0x30) {
@@ -202,7 +204,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
        map = pll_map(bios);
        while (map && map->reg) {
                if (map->type == type && *ver >= 0x20) {
-                       u16 addr = (data += hdr);
+                       u32 addr = (data += hdr);
                        *reg = map->reg;
                        while (cnt--) {
                                if (nvbios_rd32(bios, data) == map->reg)
@@ -228,7 +230,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
        struct nvkm_device *device = subdev->device;
        u8  ver, len;
        u32 reg = type;
-       u16 data;
+       u32 data;
 
        if (type > PLL_MAX) {
                reg  = type;
index c268e5afe852f35af49f9e65c80556671802d10a..b4a308f3cf7b0c461515a9904e62280b245d70a9 100644 (file)
 #include <subdev/bios/image.h>
 #include <subdev/bios/pmu.h>
 
-static u32
-weirdo_pointer(struct nvkm_bios *bios, u32 data)
-{
-       struct nvbios_image image;
-       int idx = 0;
-       if (nvbios_image(bios, idx++, &image)) {
-               data -= image.size;
-               while (nvbios_image(bios, idx++, &image)) {
-                       if (image.type == 0xe0)
-                               return image.base + data;
-               }
-       }
-       return 0;
-}
-
 u32
 nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
@@ -50,7 +35,7 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
        if (!bit_entry(bios, 'p', &bit_p)) {
                if (bit_p.version == 2 && bit_p.length >= 4)
                        data = nvbios_rd32(bios, bit_p.offset + 0x00);
-               if ((data = weirdo_pointer(bios, data))) {
+               if (data) {
                        *ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
                        *hdr = nvbios_rd08(bios, data + 0x01);
                        *len = nvbios_rd08(bios, data + 0x02);
@@ -97,8 +82,7 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
        u32 data;
        memset(info, 0x00, sizeof(*info));
        while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
-               if ( pmuE.type == type &&
-                   (data = weirdo_pointer(bios, pmuE.data))) {
+               if (pmuE.type == type && (data = pmuE.data)) {
                        info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
                        info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
                        info->boot_addr     = data + 0x30;
index d0ae7454764ea4097e9be1386b268414ba68a11d..b57c370c725d8c56533b0f0625c91629b0d8a4f5 100644 (file)
@@ -30,11 +30,11 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
                u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
 {
        struct bit_entry bit_P;
-       u16 rammap = 0x0000;
+       u32 rammap = 0x0000;
 
        if (!bit_entry(bios, 'P', &bit_P)) {
                if (bit_P.version == 2)
-                       rammap = nvbios_rd16(bios, bit_P.offset + 4);
+                       rammap = nvbios_rd32(bios, bit_P.offset + 4);
 
                if (rammap) {
                        *ver = nvbios_rd08(bios, rammap + 0);
@@ -61,7 +61,7 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx,
                u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
 {
        u8  snr, ssz;
-       u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
+       u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
        if (rammap && idx < *cnt) {
                rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
                *hdr = *len;
index 78c449b417b74c18b6d03c0eacb9b1b4894026a9..89d5543118cff139e7736d2d69dcab0c12984041 100644 (file)
@@ -99,7 +99,7 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
 {
        struct nvkm_device *device = clk->base.subdev.device;
        u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
-       u32 sctl = nvkm_rd32(device, dctl + (doff * 4));
+       u32 sclk, sctl, sdiv = 2;
 
        switch (ssrc & 0x00000003) {
        case 0:
@@ -109,13 +109,21 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
        case 2:
                return 100000;
        case 3:
-               if (sctl & 0x80000000) {
-                       u32 sclk = read_vco(clk, dsrc + (doff * 4));
-                       u32 sdiv = (sctl & 0x0000003f) + 2;
-                       return (sclk * 2) / sdiv;
+               sclk = read_vco(clk, dsrc + (doff * 4));
+
+               /* Memclk has doff of 0 despite its alt. location */
+               if (doff <= 2) {
+                       sctl = nvkm_rd32(device, dctl + (doff * 4));
+
+                       if (sctl & 0x80000000) {
+                               if (ssrc & 0x100)
+                                       sctl >>= 8;
+
+                               sdiv = (sctl & 0x3f) + 2;
+                       }
                }
 
-               return read_vco(clk, dsrc + (doff * 4));
+               return (sclk * 2) / sdiv;
        default:
                return 0;
        }
@@ -366,11 +374,17 @@ gf100_clk_prog_2(struct gf100_clk *clk, int idx)
                if (info->coef) {
                        nvkm_wr32(device, addr + 0x04, info->coef);
                        nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+                       /* Test PLL lock */
+                       nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
                        nvkm_msec(device, 2000,
                                if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
                                        break;
                        );
-                       nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+                       nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+                       /* Enable sync mode */
+                       nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
                }
        }
 }
index 975c401bccab8c4e3ab69e5daec2de65ffd09ea6..06bc0d2d6ae195bb6c9cf5f88141bfef8e572a31 100644 (file)
@@ -393,11 +393,17 @@ gk104_clk_prog_2(struct gk104_clk *clk, int idx)
        if (info->coef) {
                nvkm_wr32(device, addr + 0x04, info->coef);
                nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
+
+               /* Test PLL lock */
+               nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
                nvkm_msec(device, 2000,
                        if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
                                break;
                );
-               nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004);
+               nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
+
+               /* Enable sync mode */
+               nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
        }
 }
 
index 5f0ee24e31b8d5abd4240c054383b92dea73edc6..218893e3e5f92b95efa613e2f4269b4d19d0e8bb 100644 (file)
 #include <core/tegra.h>
 #include <subdev/timer.h>
 
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w)        ((1 << w) - 1)
-
-#define GPCPLL_CFG             (SYS_GPCPLL_CFG_BASE + 0)
-#define GPCPLL_CFG_ENABLE      BIT(0)
-#define GPCPLL_CFG_IDDQ                BIT(1)
-#define GPCPLL_CFG_LOCK_DET_OFF        BIT(4)
-#define GPCPLL_CFG_LOCK                BIT(17)
-
-#define GPCPLL_COEFF           (SYS_GPCPLL_CFG_BASE + 4)
-#define GPCPLL_COEFF_M_SHIFT   0
-#define GPCPLL_COEFF_M_WIDTH   8
-#define GPCPLL_COEFF_N_SHIFT   8
-#define GPCPLL_COEFF_N_WIDTH   8
-#define GPCPLL_COEFF_P_SHIFT   16
-#define GPCPLL_COEFF_P_WIDTH   6
-
-#define GPCPLL_CFG2                    (SYS_GPCPLL_CFG_BASE + 0xc)
-#define GPCPLL_CFG2_SETUP2_SHIFT       16
-#define GPCPLL_CFG2_PLL_STEPA_SHIFT    24
-
-#define GPCPLL_CFG3                    (SYS_GPCPLL_CFG_BASE + 0x18)
-#define GPCPLL_CFG3_PLL_STEPB_SHIFT    16
-
-#define GPC_BCASE_GPCPLL_CFG_BASE              0x00132800
-#define GPCPLL_NDIV_SLOWDOWN                   (SYS_GPCPLL_CFG_BASE + 0x1c)
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT     0
-#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT    8
-#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT    16
-#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT  22
-#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT  31
-
-#define SEL_VCO                                (SYS_GPCPLL_CFG_BASE + 0x100)
-#define SEL_VCO_GPC2CLK_OUT_SHIFT      0
-
-#define GPC2CLK_OUT                    (SYS_GPCPLL_CFG_BASE + 0x250)
-#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH        1
-#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT        31
-#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
-#define GPC2CLK_OUT_VCODIV_WIDTH       6
-#define GPC2CLK_OUT_VCODIV_SHIFT       8
-#define GPC2CLK_OUT_VCODIV1            0
-#define GPC2CLK_OUT_VCODIV_MASK                (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
-                                       GPC2CLK_OUT_VCODIV_SHIFT)
-#define GPC2CLK_OUT_BYPDIV_WIDTH       6
-#define GPC2CLK_OUT_BYPDIV_SHIFT       0
-#define GPC2CLK_OUT_BYPDIV31           0x3c
-#define GPC2CLK_OUT_INIT_MASK  ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
-               GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
-               | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
-               | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
-#define GPC2CLK_OUT_INIT_VAL   ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
-               GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
-               | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
-               | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
-
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG  (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT    24
-#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
-           (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
-
 static const u8 _pl_to_div[] = {
 /* PL:   0, 1, 2, 3, 4, 5, 6,  7,  8,  9, 10, 11, 12, 13, 14 */
 /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
@@ -124,7 +61,7 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
        .min_pl = 1, .max_pl = 32,
 };
 
-static void
+void
 gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
 {
        struct nvkm_device *device = clk->base.subdev.device;
@@ -136,20 +73,33 @@ gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
        pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
 }
 
-static u32
-gk20a_pllg_calc_rate(struct gk20a_clk *clk)
+void
+gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
+{
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 val;
+
+       val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
+       val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+       val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
+       nvkm_wr32(device, GPCPLL_COEFF, val);
+}
+
+u32
+gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
 {
        u32 rate;
        u32 divider;
 
-       rate = clk->parent_rate * clk->pll.n;
-       divider = clk->pll.m * clk->pl_to_div(clk->pll.pl);
+       rate = clk->parent_rate * pll->n;
+       divider = pll->m * clk->pl_to_div(pll->pl);
 
        return rate / divider / 2;
 }
 
-static int
-gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
+int
+gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
+                   struct gk20a_pll *pll)
 {
        struct nvkm_subdev *subdev = &clk->base.subdev;
        u32 target_clk_f, ref_clk_f, target_freq;
@@ -163,16 +113,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
        target_clk_f = rate * 2 / KHZ;
        ref_clk_f = clk->parent_rate / KHZ;
 
-       max_vco_f = clk->params->max_vco;
+       target_vco_f = target_clk_f + target_clk_f / 50;
+       max_vco_f = max(clk->params->max_vco, target_vco_f);
        min_vco_f = clk->params->min_vco;
        best_m = clk->params->max_m;
        best_n = clk->params->min_n;
        best_pl = clk->params->min_pl;
 
-       target_vco_f = target_clk_f + target_clk_f / 50;
-       if (max_vco_f < target_vco_f)
-               max_vco_f = target_vco_f;
-
        /* min_pl <= high_pl <= max_pl */
        high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
        high_pl = min(high_pl, clk->params->max_pl);
@@ -195,9 +142,7 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
                target_vco_f = target_clk_f * clk->pl_to_div(pl);
 
                for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
-                       u32 u_f, vco_f;
-
-                       u_f = ref_clk_f / m;
+                       u32 u_f = ref_clk_f / m;
 
                        if (u_f < clk->params->min_u)
                                break;
@@ -211,6 +156,8 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
                                break;
 
                        for (; n <= n2; n++) {
+                               u32 vco_f;
+
                                if (n < clk->params->min_n)
                                        continue;
                                if (n > clk->params->max_n)
@@ -247,16 +194,16 @@ found_match:
                           "no best match for target @ %dMHz on gpc_pll",
                           target_clk_f / KHZ);
 
-       clk->pll.m = best_m;
-       clk->pll.n = best_n;
-       clk->pll.pl = best_pl;
+       pll->m = best_m;
+       pll->n = best_n;
+       pll->pl = best_pl;
 
-       target_freq = gk20a_pllg_calc_rate(clk);
+       target_freq = gk20a_pllg_calc_rate(clk, pll);
 
        nvkm_debug(subdev,
-                  "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
-                  target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl,
-                  clk->pl_to_div(clk->pll.pl));
+                  "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
+                  target_freq / KHZ, pll->m, pll->n, pll->pl,
+                  clk->pl_to_div(pll->pl));
        return 0;
 }
 
@@ -265,45 +212,36 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
 {
        struct nvkm_subdev *subdev = &clk->base.subdev;
        struct nvkm_device *device = subdev->device;
-       u32 val;
-       int ramp_timeout;
+       struct gk20a_pll pll;
+       int ret = 0;
 
        /* get old coefficients */
-       val = nvkm_rd32(device, GPCPLL_COEFF);
+       gk20a_pllg_read_mnp(clk, &pll);
        /* do nothing if NDIV is the same */
-       if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
+       if (n == pll.n)
                return 0;
 
-       /* setup */
-       nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
-               0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
-       nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
-               0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
-
        /* pll slowdown mode */
        nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
                BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
                BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
 
        /* new ndiv ready for ramp */
-       val = nvkm_rd32(device, GPCPLL_COEFF);
-       val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
-       val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+       pll.n = n;
        udelay(1);
-       nvkm_wr32(device, GPCPLL_COEFF, val);
+       gk20a_pllg_write_mnp(clk, &pll);
 
        /* dynamic ramp to new ndiv */
-       val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
-       val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
        udelay(1);
-       nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val);
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+                 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+                 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
 
-       for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
-               udelay(1);
-               val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
-               if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
-                       break;
-       }
+       /* wait for ramping to complete */
+       if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+               GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+               GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+               ret = -ETIMEDOUT;
 
        /* exit slowdown mode */
        nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
@@ -311,21 +249,35 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
                BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
        nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
 
-       if (ramp_timeout <= 0) {
-               nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
-               return -ETIMEDOUT;
-       }
-
-       return 0;
+       return ret;
 }
 
-static void
+static int
 gk20a_pllg_enable(struct gk20a_clk *clk)
 {
        struct nvkm_device *device = clk->base.subdev.device;
+       u32 val;
 
        nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
        nvkm_rd32(device, GPCPLL_CFG);
+
+       /* enable lock detection */
+       val = nvkm_rd32(device, GPCPLL_CFG);
+       if (val & GPCPLL_CFG_LOCK_DET_OFF) {
+               val &= ~GPCPLL_CFG_LOCK_DET_OFF;
+               nvkm_wr32(device, GPCPLL_CFG, val);
+       }
+
+       /* wait for lock */
+       if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
+                          GPCPLL_CFG_LOCK) < 0)
+               return -ETIMEDOUT;
+
+       /* switch to VCO mode */
+       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+               BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+       return 0;
 }
 
 static void
@@ -333,117 +285,81 @@ gk20a_pllg_disable(struct gk20a_clk *clk)
 {
        struct nvkm_device *device = clk->base.subdev.device;
 
+       /* put PLL in bypass before disabling it */
+       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
        nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
        nvkm_rd32(device, GPCPLL_CFG);
 }
 
 static int
-_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide)
+gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
 {
        struct nvkm_subdev *subdev = &clk->base.subdev;
        struct nvkm_device *device = subdev->device;
-       u32 val, cfg;
-       struct gk20a_pll old_pll;
-       u32 n_lo;
-
-       /* get old coefficients */
-       gk20a_pllg_read_mnp(clk, &old_pll);
-
-       /* do NDIV slide if there is no change in M and PL */
-       cfg = nvkm_rd32(device, GPCPLL_CFG);
-       if (allow_slide && clk->pll.m == old_pll.m &&
-           clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) {
-               return gk20a_pllg_slide(clk, clk->pll.n);
-       }
-
-       /* slide down to NDIV_LO */
-       if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
-               int ret;
-
-               n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco,
-                                   clk->parent_rate / KHZ);
-               ret = gk20a_pllg_slide(clk, n_lo);
+       struct gk20a_pll cur_pll;
+       int ret;
 
-               if (ret)
-                       return ret;
-       }
+       gk20a_pllg_read_mnp(clk, &cur_pll);
 
-       /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
+       /* split VCO-to-bypass jump in half by setting out divider 1:2 */
        nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
-               0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
-
-       /* put PLL in bypass before programming it */
-       val = nvkm_rd32(device, SEL_VCO);
-       val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+                 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+       /* Intentional 2nd write to assure linear divider operation */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+       nvkm_rd32(device, GPC2CLK_OUT);
        udelay(2);
-       nvkm_wr32(device, SEL_VCO, val);
-
-       /* get out from IDDQ */
-       val = nvkm_rd32(device, GPCPLL_CFG);
-       if (val & GPCPLL_CFG_IDDQ) {
-               val &= ~GPCPLL_CFG_IDDQ;
-               nvkm_wr32(device, GPCPLL_CFG, val);
-               nvkm_rd32(device, GPCPLL_CFG);
-               udelay(2);
-       }
 
        gk20a_pllg_disable(clk);
 
-       nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__,
-                  clk->pll.m, clk->pll.n, clk->pll.pl);
-
-       n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco,
-                           clk->parent_rate / KHZ);
-       val = clk->pll.m << GPCPLL_COEFF_M_SHIFT;
-       val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT;
-       val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT;
-       nvkm_wr32(device, GPCPLL_COEFF, val);
+       gk20a_pllg_write_mnp(clk, pll);
 
-       gk20a_pllg_enable(clk);
-
-       val = nvkm_rd32(device, GPCPLL_CFG);
-       if (val & GPCPLL_CFG_LOCK_DET_OFF) {
-               val &= ~GPCPLL_CFG_LOCK_DET_OFF;
-               nvkm_wr32(device, GPCPLL_CFG, val);
-       }
-
-       if (nvkm_usec(device, 300,
-               if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
-                       break;
-       ) < 0)
-               return -ETIMEDOUT;
-
-       /* switch to VCO mode */
-       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
-                 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+       ret = gk20a_pllg_enable(clk);
+       if (ret)
+               return ret;
 
        /* restore out divider 1:1 */
-       val = nvkm_rd32(device, GPC2CLK_OUT);
-       if ((val & GPC2CLK_OUT_VCODIV_MASK) !=
-           (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) {
-               val &= ~GPC2CLK_OUT_VCODIV_MASK;
-               val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT;
-               udelay(2);
-               nvkm_wr32(device, GPC2CLK_OUT, val);
-               /* Intentional 2nd write to assure linear divider operation */
-               nvkm_wr32(device, GPC2CLK_OUT, val);
-               nvkm_rd32(device, GPC2CLK_OUT);
-       }
+       udelay(2);
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+       /* Intentional 2nd write to assure linear divider operation */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+       nvkm_rd32(device, GPC2CLK_OUT);
 
-       /* slide up to new NDIV */
-       return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0;
+       return 0;
 }
 
 static int
-gk20a_pllg_program_mnp(struct gk20a_clk *clk)
+gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
 {
-       int err;
+       struct gk20a_pll cur_pll;
+       int ret;
 
-       err = _gk20a_pllg_program_mnp(clk, true);
-       if (err)
-               err = _gk20a_pllg_program_mnp(clk, false);
+       if (gk20a_pllg_is_enabled(clk)) {
+               gk20a_pllg_read_mnp(clk, &cur_pll);
+
+               /* just do NDIV slide if there is no change to M and PL */
+               if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+                       return gk20a_pllg_slide(clk, pll->n);
+
+               /* slide down to current NDIV_LO */
+               cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+               ret = gk20a_pllg_slide(clk, cur_pll.n);
+               if (ret)
+                       return ret;
+       }
+
+       /* program MNP with the new clock parameters and new NDIV_LO */
+       cur_pll = *pll;
+       cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
+       ret = gk20a_pllg_program_mnp(clk, &cur_pll);
+       if (ret)
+               return ret;
 
-       return err;
+       /* slide up to new NDIV */
+       return gk20a_pllg_slide(clk, pll->n);
 }
 
 static struct nvkm_pstate
@@ -546,13 +462,14 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
        struct gk20a_clk *clk = gk20a_clk(base);
        struct nvkm_subdev *subdev = &clk->base.subdev;
        struct nvkm_device *device = subdev->device;
+       struct gk20a_pll pll;
 
        switch (src) {
        case nv_clk_src_crystal:
                return device->crystal;
        case nv_clk_src_gpc:
-               gk20a_pllg_read_mnp(clk, &clk->pll);
-               return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV;
+               gk20a_pllg_read_mnp(clk, &pll);
+               return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
        default:
                nvkm_error(subdev, "invalid clock source %d\n", src);
                return -EINVAL;
@@ -565,15 +482,20 @@ gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
        struct gk20a_clk *clk = gk20a_clk(base);
 
        return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
-                                        GK20A_CLK_GPC_MDIV);
+                                        GK20A_CLK_GPC_MDIV, &clk->pll);
 }
 
 int
 gk20a_clk_prog(struct nvkm_clk *base)
 {
        struct gk20a_clk *clk = gk20a_clk(base);
+       int ret;
+
+       ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
+       if (ret)
+               ret = gk20a_pllg_program_mnp(clk, &clk->pll);
 
-       return gk20a_pllg_program_mnp(clk);
+       return ret;
 }
 
 void
@@ -581,29 +503,62 @@ gk20a_clk_tidy(struct nvkm_clk *base)
 {
 }
 
+int
+gk20a_clk_setup_slide(struct gk20a_clk *clk)
+{
+       struct nvkm_subdev *subdev = &clk->base.subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 step_a, step_b;
+
+       switch (clk->parent_rate) {
+       case 12000000:
+       case 12800000:
+       case 13000000:
+               step_a = 0x2b;
+               step_b = 0x0b;
+               break;
+       case 19200000:
+               step_a = 0x12;
+               step_b = 0x08;
+               break;
+       case 38400000:
+               step_a = 0x04;
+               step_b = 0x05;
+               break;
+       default:
+               nvkm_error(subdev, "invalid parent clock rate %u KHz",
+                          clk->parent_rate / KHZ);
+               return -EINVAL;
+       }
+
+       nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+               step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
+       nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+               step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
+
+       return 0;
+}
+
 void
 gk20a_clk_fini(struct nvkm_clk *base)
 {
        struct nvkm_device *device = base->subdev.device;
        struct gk20a_clk *clk = gk20a_clk(base);
-       u32 val;
 
        /* slide to VCO min */
-       val = nvkm_rd32(device, GPCPLL_CFG);
-       if (val & GPCPLL_CFG_ENABLE) {
+       if (gk20a_pllg_is_enabled(clk)) {
                struct gk20a_pll pll;
                u32 n_lo;
 
                gk20a_pllg_read_mnp(clk, &pll);
-               n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco,
-                                   clk->parent_rate / KHZ);
+               n_lo = gk20a_pllg_n_lo(clk, &pll);
                gk20a_pllg_slide(clk, n_lo);
        }
 
-       /* put PLL in bypass before disabling it */
-       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
-
        gk20a_pllg_disable(clk);
+
+       /* set IDDQ */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
 }
 
 static int
@@ -614,9 +569,18 @@ gk20a_clk_init(struct nvkm_clk *base)
        struct nvkm_device *device = subdev->device;
        int ret;
 
+       /* get out from IDDQ */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+       nvkm_rd32(device, GPCPLL_CFG);
+       udelay(5);
+
        nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
                  GPC2CLK_OUT_INIT_VAL);
 
+       ret = gk20a_clk_setup_slide(clk);
+       if (ret)
+               return ret;
+
        /* Start with lowest frequency */
        base->func->calc(base, &base->func->pstates[0].base);
        ret = base->func->prog(&clk->base);
@@ -646,7 +610,7 @@ gk20a_clk = {
 };
 
 int
-_gk20a_clk_ctor(struct nvkm_device *device, int index,
+gk20a_clk_ctor(struct nvkm_device *device, int index,
                const struct nvkm_clk_func *func,
                const struct gk20a_clk_pllg_params *params,
                struct gk20a_clk *clk)
@@ -685,7 +649,7 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
                return -ENOMEM;
        *pclk = &clk->base;
 
-       ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
+       ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
                              clk);
 
        clk->pl_to_div = pl_to_div;
index 13c46740197db0567d5f2894e74ae864cad17308..0d1450972162cb158dc727bb964aed67bdd56446 100644 (file)
 #ifndef __NVKM_CLK_GK20A_H__
 #define __NVKM_CLK_GK20A_H__
 
+#define KHZ (1000)
+#define MHZ (KHZ * 1000)
+
+#define MASK(w)        ((1 << (w)) - 1)
+
 #define GK20A_CLK_GPC_MDIV 1000
 
 #define SYS_GPCPLL_CFG_BASE    0x00137000
+#define GPCPLL_CFG             (SYS_GPCPLL_CFG_BASE + 0)
+#define GPCPLL_CFG_ENABLE      BIT(0)
+#define GPCPLL_CFG_IDDQ                BIT(1)
+#define GPCPLL_CFG_LOCK_DET_OFF        BIT(4)
+#define GPCPLL_CFG_LOCK                BIT(17)
+
+#define GPCPLL_CFG2            (SYS_GPCPLL_CFG_BASE + 0xc)
+#define GPCPLL_CFG2_SETUP2_SHIFT       16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT    24
+
+#define GPCPLL_CFG3                    (SYS_GPCPLL_CFG_BASE + 0x18)
+#define GPCPLL_CFG3_VCO_CTRL_SHIFT             0
+#define GPCPLL_CFG3_VCO_CTRL_WIDTH             9
+#define GPCPLL_CFG3_VCO_CTRL_MASK              \
+       (MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT)
+#define GPCPLL_CFG3_PLL_STEPB_SHIFT            16
+#define GPCPLL_CFG3_PLL_STEPB_WIDTH            8
+
+#define GPCPLL_COEFF           (SYS_GPCPLL_CFG_BASE + 4)
+#define GPCPLL_COEFF_M_SHIFT   0
+#define GPCPLL_COEFF_M_WIDTH   8
+#define GPCPLL_COEFF_N_SHIFT   8
+#define GPCPLL_COEFF_N_WIDTH   8
+#define GPCPLL_COEFF_N_MASK    \
+       (MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT)
+#define GPCPLL_COEFF_P_SHIFT   16
+#define GPCPLL_COEFF_P_WIDTH   6
+
+#define GPCPLL_NDIV_SLOWDOWN                   (SYS_GPCPLL_CFG_BASE + 0x1c)
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT     0
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT    8
+#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT    16
+#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT  22
+#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT  31
+
+#define GPC_BCAST_GPCPLL_CFG_BASE              0x00132800
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG  (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0)
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT    24
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
+       (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
+
+#define SEL_VCO                                (SYS_GPCPLL_CFG_BASE + 0x100)
+#define SEL_VCO_GPC2CLK_OUT_SHIFT      0
+
+#define GPC2CLK_OUT                    (SYS_GPCPLL_CFG_BASE + 0x250)
+#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH        1
+#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT        31
+#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
+#define GPC2CLK_OUT_VCODIV_WIDTH       6
+#define GPC2CLK_OUT_VCODIV_SHIFT       8
+#define GPC2CLK_OUT_VCODIV1            0
+#define GPC2CLK_OUT_VCODIV2            2
+#define GPC2CLK_OUT_VCODIV_MASK                (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
+                                       GPC2CLK_OUT_VCODIV_SHIFT)
+#define GPC2CLK_OUT_BYPDIV_WIDTH       6
+#define GPC2CLK_OUT_BYPDIV_SHIFT       0
+#define GPC2CLK_OUT_BYPDIV31           0x3c
+#define GPC2CLK_OUT_INIT_MASK  ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
+               GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
+               | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
+               | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
+#define GPC2CLK_OUT_INIT_VAL   ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
+               GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
+               | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
+               | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
 
 /* All frequencies in Khz */
 struct gk20a_clk_pllg_params {
@@ -54,7 +124,29 @@ struct gk20a_clk {
 };
 #define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
 
-int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
+u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *);
+int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *);
+void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *);
+void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *);
+
+static inline bool
+gk20a_pllg_is_enabled(struct gk20a_clk *clk)
+{
+       struct nvkm_device *device = clk->base.subdev.device;
+       u32 val;
+
+       val = nvkm_rd32(device, GPCPLL_CFG);
+       return val & GPCPLL_CFG_ENABLE;
+}
+
+static inline u32
+gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
+{
+       return DIV_ROUND_UP(pll->m * clk->params->min_vco,
+                           clk->parent_rate / KHZ);
+}
+
+int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
                    const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
 void gk20a_clk_fini(struct nvkm_clk *);
 int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
@@ -62,4 +154,6 @@ int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
 int gk20a_clk_prog(struct nvkm_clk *);
 void gk20a_clk_tidy(struct nvkm_clk *);
 
+int gk20a_clk_setup_slide(struct gk20a_clk *);
+
 #endif
index 71b2bbb6197304ca3dea55821dd7ed83e809ce44..b284e949f73240c2702e87113d696523b7baea73 100644 (file)
  */
 
 #include <subdev/clk.h>
+#include <subdev/volt.h>
+#include <subdev/timer.h>
 #include <core/device.h>
+#include <core/tegra.h>
 
 #include "priv.h"
 #include "gk20a.h"
 
-#define KHZ (1000)
-#define MHZ (KHZ * 1000)
-
-#define MASK(w)        ((1 << w) - 1)
+#define GPCPLL_CFG_SYNC_MODE   BIT(2)
 
 #define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
 #define BYPASSCTRL_SYS_GPCPLL_SHIFT    0
 #define BYPASSCTRL_SYS_GPCPLL_WIDTH    1
 
+#define GPCPLL_CFG2_SDM_DIN_SHIFT      0
+#define GPCPLL_CFG2_SDM_DIN_WIDTH      8
+#define GPCPLL_CFG2_SDM_DIN_MASK       \
+       (MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
+#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT  8
+#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH  15
+#define GPCPLL_CFG2_SDM_DIN_NEW_MASK   \
+       (MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
+#define GPCPLL_CFG2_SETUP2_SHIFT       16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT    24
+
+#define GPCPLL_DVFS0   (SYS_GPCPLL_CFG_BASE + 0x10)
+#define GPCPLL_DVFS0_DFS_COEFF_SHIFT   0
+#define GPCPLL_DVFS0_DFS_COEFF_WIDTH   7
+#define GPCPLL_DVFS0_DFS_COEFF_MASK    \
+       (MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
+#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8
+#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7
+#define GPCPLL_DVFS0_DFS_DET_MAX_MASK  \
+       (MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
+
+#define GPCPLL_DVFS1           (SYS_GPCPLL_CFG_BASE + 0x14)
+#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT         0
+#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH         7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT                7
+#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH                1
+#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT         8
+#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH         7
+#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT         15
+#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH         1
+#define GPCPLL_DVFS1_DFS_CTRL_SHIFT            16
+#define GPCPLL_DVFS1_DFS_CTRL_WIDTH            12
+#define GPCPLL_DVFS1_EN_SDM_SHIFT              28
+#define GPCPLL_DVFS1_EN_SDM_WIDTH              1
+#define GPCPLL_DVFS1_EN_SDM_BIT                        BIT(28)
+#define GPCPLL_DVFS1_EN_DFS_SHIFT              29
+#define GPCPLL_DVFS1_EN_DFS_WIDTH              1
+#define GPCPLL_DVFS1_EN_DFS_BIT                        BIT(29)
+#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT          30
+#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH          1
+#define GPCPLL_DVFS1_EN_DFS_CAL_BIT            BIT(30)
+#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT                31
+#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH                1
+#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT          BIT(31)
+
+#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
+#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT      BIT(16)
+
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT      24
+#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH      7
+
+#define DFS_DET_RANGE  6       /* -2^6 ... 2^6-1 */
+#define SDM_DIN_RANGE  12      /* -2^12 ... 2^12-1 */
+
+struct gm20b_clk_dvfs_params {
+       s32 coeff_slope;
+       s32 coeff_offs;
+       u32 vco_ctrl;
+};
+
+static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
+       .coeff_slope = -165230,
+       .coeff_offs = 214007,
+       .vco_ctrl = 0x7 << 3,
+};
+
+/*
+ * base.n is now the *integer* part of the N factor.
+ * sdm_din contains n's decimal part.
+ */
+struct gm20b_pll {
+       struct gk20a_pll base;
+       u32 sdm_din;
+};
+
+struct gm20b_clk_dvfs {
+       u32 dfs_coeff;
+       s32 dfs_det_max;
+       s32 dfs_ext_cal;
+};
+
+struct gm20b_clk {
+       /* currently applied parameters */
+       struct gk20a_clk base;
+       struct gm20b_clk_dvfs dvfs;
+       u32 uv;
+
+       /* new parameters to apply */
+       struct gk20a_pll new_pll;
+       struct gm20b_clk_dvfs new_dvfs;
+       u32 new_uv;
+
+       const struct gm20b_clk_dvfs_params *dvfs_params;
+
+       /* fused parameters */
+       s32 uvdet_slope;
+       s32 uvdet_offs;
+
+       /* safe frequency we can use at minimum voltage */
+       u32 safe_fmax_vmin;
+};
+#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
+
 static u32 pl_to_div(u32 pl)
 {
        return pl;
@@ -53,6 +156,484 @@ static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
        .min_pl = 1, .max_pl = 31,
 };
 
+static void
+gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 val;
+
+       gk20a_pllg_read_mnp(&clk->base, &pll->base);
+       val = nvkm_rd32(device, GPCPLL_CFG2);
+       pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
+                      MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+}
+
+static void
+gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+                 pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+       gk20a_pllg_write_mnp(&clk->base, &pll->base);
+}
+
+/*
+ * Determine DFS_COEFF for the requested voltage. Always select external
+ * calibration override equal to the voltage, and set maximum detection
+ * limit "0" (to make sure that PLL output remains under F/V curve when
+ * voltage increases).
+ */
+static void
+gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
+                         struct gm20b_clk_dvfs *dvfs)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
+       u32 coeff;
+       /* Work with mv as uv would likely trigger an overflow */
+       s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
+
+       /* coeff = slope * voltage + offset */
+       coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
+       coeff = DIV_ROUND_CLOSEST(coeff, 1000);
+       dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
+
+       dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
+                                            clk->uvdet_slope);
+       /* should never happen */
+       if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
+               nvkm_error(subdev, "dfs_ext_cal overflow!\n");
+
+       dvfs->dfs_det_max = 0;
+
+       nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
+                  __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
+                  dvfs->dfs_det_max);
+}
+
+/*
+ * Solve equation for integer and fractional part of the effective NDIV:
+ *
+ * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
+ *         (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
+ *
+ * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
+ */
+static void
+gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       const struct gk20a_clk_pllg_params *p = clk->base.params;
+       u32 n;
+       s32 det_delta;
+       u32 rem, rem_range;
+
+       /* calculate current ext_cal and subtract previous one */
+       det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
+                                     clk->uvdet_slope);
+       det_delta -= clk->dvfs.dfs_ext_cal;
+       det_delta = min(det_delta, clk->dvfs.dfs_det_max);
+       det_delta *= clk->dvfs.dfs_coeff;
+
+       /* integer part of n */
+       n = (n_eff << DFS_DET_RANGE) - det_delta;
+       /* should never happen! */
+       if (n <= 0) {
+               nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
+               n = 1 << DFS_DET_RANGE;
+       }
+       if (n >> DFS_DET_RANGE > p->max_n) {
+               nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
+               n = p->max_n << DFS_DET_RANGE;
+       }
+       *n_int = n >> DFS_DET_RANGE;
+
+       /* fractional part of n */
+       rem = ((u32)n) & MASK(DFS_DET_RANGE);
+       rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
+       /* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
+       rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
+       /* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
+       *sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
+
+       nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
+                  n_eff, *n_int, *sdm_din);
+}
+
+static int
+gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_device *device = subdev->device;
+       struct gm20b_pll pll;
+       u32 n_int, sdm_din;
+       int ret = 0;
+
+       /* calculate the new n_int/sdm_din for this n/uv */
+       gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
+
+       /* get old coefficients */
+       gm20b_pllg_read_mnp(clk, &pll);
+       /* do nothing if NDIV is the same */
+       if (n_int == pll.base.n && sdm_din == pll.sdm_din)
+               return 0;
+
+       /* pll slowdown mode */
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+               BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
+               BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
+
+       /* new ndiv ready for ramp */
+       /* in DVFS mode SDM is updated via "new" field */
+       nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
+                 sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
+       pll.base.n = n_int;
+       udelay(1);
+       gk20a_pllg_write_mnp(&clk->base, &pll.base);
+
+       /* dynamic ramp to new ndiv */
+       udelay(1);
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+                 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
+                 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
+
+       /* wait for ramping to complete */
+       if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
+               GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
+               GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
+               ret = -ETIMEDOUT;
+
+       /* in DVFS mode complete SDM update */
+       nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
+                 sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
+
+       /* exit slowdown mode */
+       nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
+               BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
+               BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
+       nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
+
+       return ret;
+}
+
+static int
+gm20b_pllg_enable(struct gm20b_clk *clk)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+       nvkm_rd32(device, GPCPLL_CFG);
+
+       /* In DVFS mode lock cannot be used - so just delay */
+       udelay(40);
+
+       /* set SYNC_MODE for glitchless switch out of bypass */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
+                      GPCPLL_CFG_SYNC_MODE);
+       nvkm_rd32(device, GPCPLL_CFG);
+
+       /* switch to VCO mode */
+       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
+                 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+       return 0;
+}
+
+static void
+gm20b_pllg_disable(struct gm20b_clk *clk)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       /* put PLL in bypass before disabling it */
+       nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
+       /* clear SYNC_MODE before disabling PLL */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
+
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+       nvkm_rd32(device, GPCPLL_CFG);
+}
+
+static int
+gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_device *device = subdev->device;
+       struct gm20b_pll cur_pll;
+       u32 n_int, sdm_din;
+       /* if we only change pdiv, we can do a glitchless transition */
+       bool pdiv_only;
+       int ret;
+
+       gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
+       gm20b_pllg_read_mnp(clk, &cur_pll);
+       pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
+                   cur_pll.base.m == pll->m;
+
+       /* need full sequence if clock not enabled yet */
+       if (!gk20a_pllg_is_enabled(&clk->base))
+               pdiv_only = false;
+
+       /* split VCO-to-bypass jump in half by setting out divider 1:2 */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+       /* Intentional 2nd write to assure linear divider operation */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
+       nvkm_rd32(device, GPC2CLK_OUT);
+       udelay(2);
+
+       if (pdiv_only) {
+               u32 old = cur_pll.base.pl;
+               u32 new = pll->pl;
+
+               /*
+                * we can do a glitchless transition only if the old and new PL
+                * parameters share at least one bit set to 1. If this is not
+                * the case, calculate and program an interim PL that will allow
+                * us to respect that rule.
+                */
+               if ((old & new) == 0) {
+                       cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
+                                             new | BIT(ffs(old) - 1));
+                       gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+               }
+
+               cur_pll.base.pl = new;
+               gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
+       } else {
+               /* disable before programming if more than pdiv changes */
+               gm20b_pllg_disable(clk);
+
+               cur_pll.base = *pll;
+               cur_pll.base.n = n_int;
+               cur_pll.sdm_din = sdm_din;
+               gm20b_pllg_write_mnp(clk, &cur_pll);
+
+               ret = gm20b_pllg_enable(clk);
+               if (ret)
+                       return ret;
+       }
+
+       /* restore out divider 1:1 */
+       udelay(2);
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+       /* Intentional 2nd write to assure linear divider operation */
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+                 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
+       nvkm_rd32(device, GPC2CLK_OUT);
+
+       return 0;
+}
+
+static int
+gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
+{
+       struct gk20a_pll cur_pll;
+       int ret;
+
+       if (gk20a_pllg_is_enabled(&clk->base)) {
+               gk20a_pllg_read_mnp(&clk->base, &cur_pll);
+
+               /* just do NDIV slide if there is no change to M and PL */
+               if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
+                       return gm20b_pllg_slide(clk, pll->n);
+
+               /* slide down to current NDIV_LO */
+               cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+               ret = gm20b_pllg_slide(clk, cur_pll.n);
+               if (ret)
+                       return ret;
+       }
+
+       /* program MNP with the new clock parameters and new NDIV_LO */
+       cur_pll = *pll;
+       cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
+       ret = gm20b_pllg_program_mnp(clk, &cur_pll);
+       if (ret)
+               return ret;
+
+       /* slide up to new NDIV */
+       return gm20b_pllg_slide(clk, pll->n);
+}
+
+static int
+gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
+{
+       struct gm20b_clk *clk = gm20b_clk(base);
+       struct nvkm_subdev *subdev = &base->subdev;
+       struct nvkm_volt *volt = base->subdev.device->volt;
+       int ret;
+
+       ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
+                                            GK20A_CLK_GPC_MDIV, &clk->new_pll);
+       if (ret)
+               return ret;
+
+       clk->new_uv = volt->vid[cstate->voltage].uv;
+       gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
+
+       nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
+
+       return 0;
+}
+
+/*
+ * Compute PLL parameters that are always safe for the current voltage
+ */
+static void
+gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
+{
+       u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
+       u32 parent_rate = clk->base.parent_rate / KHZ;
+       u32 nmin, nsafe;
+
+       /* remove a safe margin of 10% */
+       if (rate > clk->safe_fmax_vmin)
+               rate = rate * (100 - 10) / 100;
+
+       /* gpc2clk */
+       rate *= 2;
+
+       nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
+       nsafe = pll->m * rate / (clk->base.parent_rate);
+
+       if (nsafe < nmin) {
+               pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
+               nsafe = nmin;
+       }
+
+       pll->n = nsafe;
+}
+
+static void
+gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       /* strobe to read external DFS coefficient */
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+       nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
+                 coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
+
+       udelay(1);
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+}
+
+static void
+gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+       u32 val;
+
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
+                 dfs_det_cal);
+       udelay(1);
+
+       val = nvkm_rd32(device, GPCPLL_DVFS1);
+       if (!(val & BIT(25))) {
+               /* Use external value to overwrite calibration value */
+               val |= BIT(25) | BIT(16);
+               nvkm_wr32(device, GPCPLL_DVFS1, val);
+       }
+}
+
+static void
+gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
+                                struct gm20b_clk_dvfs *dvfs)
+{
+       struct nvkm_device *device = clk->base.base.subdev.device;
+
+       /* strobe to read external DFS coefficient */
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
+
+       nvkm_mask(device, GPCPLL_DVFS0,
+                 GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
+                 dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
+                 dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
+
+       udelay(1);
+       nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
+                 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
+
+       gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
+}
+
+static int
+gm20b_clk_prog(struct nvkm_clk *base)
+{
+       struct gm20b_clk *clk = gm20b_clk(base);
+       u32 cur_freq;
+       int ret;
+
+       /* No change in DVFS settings? */
+       if (clk->uv == clk->new_uv)
+               goto prog;
+
+       /*
+        * Interim step for changing DVFS detection settings: low enough
+        * frequency to be safe at at DVFS coeff = 0.
+        *
+        * 1. If voltage is increasing:
+        * - safe frequency target matches the lowest - old - frequency
+        * - DVFS settings are still old
+        * - Voltage already increased to new level by volt, but maximum
+        *   detection limit assures PLL output remains under F/V curve
+        *
+        * 2. If voltage is decreasing:
+        * - safe frequency target matches the lowest - new - frequency
+        * - DVFS settings are still old
+        * - Voltage is also old, it will be lowered by volt afterwards
+        *
+        * Interim step can be skipped if old frequency is below safe minimum,
+        * i.e., it is low enough to be safe at any voltage in operating range
+        * with zero DVFS coefficient.
+        */
+       cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
+       if (cur_freq > clk->safe_fmax_vmin) {
+               struct gk20a_pll pll_safe;
+
+               if (clk->uv < clk->new_uv)
+                       /* voltage will raise: safe frequency is current one */
+                       pll_safe = clk->base.pll;
+               else
+                       /* voltage will drop: safe frequency is new one */
+                       pll_safe = clk->new_pll;
+
+               gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
+               ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * DVFS detection settings transition:
+        * - Set DVFS coefficient zero
+        * - Set calibration level to new voltage
+        * - Set DVFS coefficient to match new voltage
+        */
+       gm20b_dvfs_program_coeff(clk, 0);
+       gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
+       gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
+       gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+prog:
+       clk->uv = clk->new_uv;
+       clk->dvfs = clk->new_dvfs;
+       clk->base.pll = clk->new_pll;
+
+       return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
+}
+
 static struct nvkm_pstate
 gm20b_pstates[] = {
        {
@@ -133,9 +714,99 @@ gm20b_pstates[] = {
                        .voltage = 12,
                },
        },
-
 };
 
+static void
+gm20b_clk_fini(struct nvkm_clk *base)
+{
+       struct nvkm_device *device = base->subdev.device;
+       struct gm20b_clk *clk = gm20b_clk(base);
+
+       /* slide to VCO min */
+       if (gk20a_pllg_is_enabled(&clk->base)) {
+               struct gk20a_pll pll;
+               u32 n_lo;
+
+               gk20a_pllg_read_mnp(&clk->base, &pll);
+               n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
+               gm20b_pllg_slide(clk, n_lo);
+       }
+
+       gm20b_pllg_disable(clk);
+
+       /* set IDDQ */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
+}
+
+static int
+gm20b_clk_init_dvfs(struct gm20b_clk *clk)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_device *device = subdev->device;
+       bool fused = clk->uvdet_offs && clk->uvdet_slope;
+       static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
+       u32 data;
+       int ret;
+
+       /* Enable NA DVFS */
+       nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
+                 GPCPLL_DVFS1_EN_DFS_BIT);
+
+       /* Set VCO_CTRL */
+       if (clk->dvfs_params->vco_ctrl)
+               nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
+                     clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
+
+       if (fused) {
+               /* Start internal calibration, but ignore results */
+               nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+                         GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+               /* got uvdev parameters from fuse, skip calibration */
+               goto calibrated;
+       }
+
+       /*
+        * If calibration parameters are not fused, start internal calibration,
+        * wait for completion, and use results along with default slope to
+        * calculate ADC offset during boot.
+        */
+       nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
+                         GPCPLL_DVFS1_EN_DFS_CAL_BIT);
+
+       /* Wait for internal calibration done (spec < 2us). */
+       ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
+                            GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
+                            GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
+       if (ret < 0) {
+               nvkm_error(subdev, "GPCPLL calibration timeout\n");
+               return -ETIMEDOUT;
+       }
+
+       data = nvkm_rd32(device, GPCPLL_CFG3) >>
+                        GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
+       data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
+
+       clk->uvdet_slope = ADC_SLOPE_UV;
+       clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
+
+       nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
+                  clk->uvdet_offs, clk->uvdet_slope);
+
+calibrated:
+       /* Compute and apply initial DVFS parameters */
+       gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
+       gm20b_dvfs_program_coeff(clk, 0);
+       gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
+       gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
+       gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
+
+       return 0;
+}
+
+/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
+static const struct nvkm_clk_func gm20b_clk;
+
 static int
 gm20b_clk_init(struct nvkm_clk *base)
 {
@@ -143,15 +814,56 @@ gm20b_clk_init(struct nvkm_clk *base)
        struct nvkm_subdev *subdev = &clk->base.subdev;
        struct nvkm_device *device = subdev->device;
        int ret;
+       u32 data;
+
+       /* get out from IDDQ */
+       nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
+       nvkm_rd32(device, GPCPLL_CFG);
+       udelay(5);
+
+       nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
+                 GPC2CLK_OUT_INIT_VAL);
 
        /* Set the global bypass control to VCO */
        nvkm_mask(device, BYPASSCTRL_SYS,
               MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
               0);
 
+       ret = gk20a_clk_setup_slide(clk);
+       if (ret)
+               return ret;
+
+       /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
+       data = nvkm_rd32(device, 0x021944);
+       if (!(data & 0x3)) {
+               data |= 0x2;
+               nvkm_wr32(device, 0x021944, data);
+
+               data = nvkm_rd32(device, 0x021948);
+               data |=  0x1;
+               nvkm_wr32(device, 0x021948, data);
+       }
+
+       /* Disable idle slow down  */
+       nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
+
+       /* speedo >= 1? */
+       if (clk->base.func == &gm20b_clk) {
+               struct gm20b_clk *_clk = gm20b_clk(base);
+               struct nvkm_volt *volt = device->volt;
+
+               /* Get current voltage */
+               _clk->uv = nvkm_volt_get(volt);
+
+               /* Initialize DVFS */
+               ret = gm20b_clk_init_dvfs(_clk);
+               if (ret)
+                       return ret;
+       }
+
        /* Start with lowest frequency */
        base->func->calc(base, &base->func->pstates[0].base);
-       ret = base->func->prog(&clk->base);
+       ret = base->func->prog(base);
        if (ret) {
                nvkm_error(subdev, "cannot initialize clock\n");
                return ret;
@@ -169,6 +881,7 @@ gm20b_clk_speedo0 = {
        .prog = gk20a_clk_prog,
        .tidy = gk20a_clk_tidy,
        .pstates = gm20b_pstates,
+       /* Speedo 0 only supports 12 voltages */
        .nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
        .domains = {
                { nv_clk_src_crystal, 0xff },
@@ -177,8 +890,26 @@ gm20b_clk_speedo0 = {
        },
 };
 
-int
-gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+static const struct nvkm_clk_func
+gm20b_clk = {
+       .init = gm20b_clk_init,
+       .fini = gm20b_clk_fini,
+       .read = gk20a_clk_read,
+       .calc = gm20b_clk_calc,
+       .prog = gm20b_clk_prog,
+       .tidy = gk20a_clk_tidy,
+       .pstates = gm20b_pstates,
+       .nr_pstates = ARRAY_SIZE(gm20b_pstates),
+       .domains = {
+               { nv_clk_src_crystal, 0xff },
+               { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+               { nv_clk_src_max },
+       },
+};
+
+static int
+gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
+                     struct nvkm_clk **pclk)
 {
        struct gk20a_clk *clk;
        int ret;
@@ -188,11 +919,156 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
                return -ENOMEM;
        *pclk = &clk->base;
 
-       ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
-                             &gm20b_pllg_params, clk);
+       ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
+                            &gm20b_pllg_params, clk);
 
        clk->pl_to_div = pl_to_div;
        clk->div_to_pl = div_to_pl;
 
        return ret;
 }
+
+/* FUSE register */
+#define FUSE_RESERVED_CALIB0   0x204
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT      0
+#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH      4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT       4
+#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH       10
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT          14
+#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH          10
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT           24
+#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH           6
+#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT            30
+#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH            2
+
+static int
+gm20b_clk_init_fused_params(struct gm20b_clk *clk)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       u32 val = 0;
+       u32 rev = 0;
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA)
+       tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
+       rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
+             MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
+#endif
+
+       /* No fused parameters, we will calibrate later */
+       if (rev == 0)
+               return -EINVAL;
+
+       /* Integer part in mV + fractional part in uV */
+       clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
+                       MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
+                       ((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
+                       MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
+
+       /* Integer part in mV + fractional part in 100uV */
+       clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
+                       MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
+                       ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
+                        MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
+
+       nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
+                  clk->uvdet_slope, clk->uvdet_offs);
+       return 0;
+}
+
+static int
+gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
+{
+       struct nvkm_subdev *subdev = &clk->base.base.subdev;
+       struct nvkm_volt *volt = subdev->device->volt;
+       struct nvkm_pstate *pstates = clk->base.base.func->pstates;
+       int nr_pstates = clk->base.base.func->nr_pstates;
+       int vmin, id = 0;
+       u32 fmax = 0;
+       int i;
+
+       /* find lowest voltage we can use */
+       vmin = volt->vid[0].uv;
+       for (i = 1; i < volt->vid_nr; i++) {
+               if (volt->vid[i].uv <= vmin) {
+                       vmin = volt->vid[i].uv;
+                       id = volt->vid[i].vid;
+               }
+       }
+
+       /* find max frequency at this voltage */
+       for (i = 0; i < nr_pstates; i++)
+               if (pstates[i].base.voltage == id)
+                       fmax = max(fmax,
+                                  pstates[i].base.domain[nv_clk_src_gpc]);
+
+       if (!fmax) {
+               nvkm_error(subdev, "failed to evaluate safe fmax\n");
+               return -EINVAL;
+       }
+
+       /* we are safe at 90% of the max frequency */
+       clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
+       nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
+
+       return 0;
+}
+
+int
+gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
+{
+       struct nvkm_device_tegra *tdev = device->func->tegra(device);
+       struct gm20b_clk *clk;
+       struct nvkm_subdev *subdev;
+       struct gk20a_clk_pllg_params *clk_params;
+       int ret;
+
+       /* Speedo 0 GPUs cannot use noise-aware PLL */
+       if (tdev->gpu_speedo_id == 0)
+               return gm20b_clk_new_speedo0(device, index, pclk);
+
+       /* Speedo >= 1, use NAPLL */
+       clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
+       if (!clk)
+               return -ENOMEM;
+       *pclk = &clk->base.base;
+       subdev = &clk->base.base.subdev;
+
+       /* duplicate the clock parameters since we will patch them below */
+       clk_params = (void *) (clk + 1);
+       *clk_params = gm20b_pllg_params;
+       ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
+                            &clk->base);
+       if (ret)
+               return ret;
+
+       /*
+        * NAPLL can only work with max_u, clamp the m range so
+        * gk20a_pllg_calc_mnp always uses it
+        */
+       clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
+                                               (clk->base.parent_rate / KHZ));
+       if (clk_params->max_m == 0) {
+               nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
+               kfree(clk);
+               return gm20b_clk_new_speedo0(device, index, pclk);
+       }
+
+       clk->base.pl_to_div = pl_to_div;
+       clk->base.div_to_pl = div_to_pl;
+
+       clk->dvfs_params = &gm20b_dvfs_params;
+
+       ret = gm20b_clk_init_fused_params(clk);
+       /*
+        * we will calibrate during init - should never happen on
+        * prod parts
+        */
+       if (ret)
+               nvkm_warn(subdev, "no fused calibration parameters\n");
+
+       ret = gm20b_clk_init_safe_fmax(clk);
+       if (ret)
+               return ret;
+
+       return 0;
+}
index 842d5de96d73169be05fc86d2462fb1e27ebdcf9..edcc157e6ac843d13f5ad9339c05b662c3f784ce 100644 (file)
@@ -24,6 +24,8 @@ nvkm-y += nvkm/subdev/fb/gk104.o
 nvkm-y += nvkm/subdev/fb/gk20a.o
 nvkm-y += nvkm/subdev/fb/gm107.o
 nvkm-y += nvkm/subdev/fb/gm200.o
+nvkm-y += nvkm/subdev/fb/gp100.o
+nvkm-y += nvkm/subdev/fb/gp104.o
 
 nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -41,6 +43,7 @@ nvkm-y += nvkm/subdev/fb/rammcp77.o
 nvkm-y += nvkm/subdev/fb/ramgf100.o
 nvkm-y += nvkm/subdev/fb/ramgk104.o
 nvkm-y += nvkm/subdev/fb/ramgm107.o
+nvkm-y += nvkm/subdev/fb/ramgp100.o
 nvkm-y += nvkm/subdev/fb/sddr2.o
 nvkm-y += nvkm/subdev/fb/sddr3.o
 nvkm-y += nvkm/subdev/fb/gddr3.o
index ce90242b8cceeca8718b2daa7c1c6e8553d77db5..a7049c041594b6511f52b33331a73e2bc8e3582d 100644 (file)
@@ -25,6 +25,7 @@
 #include "ram.h"
 
 #include <core/memory.h>
+#include <core/option.h>
 #include <subdev/bios.h>
 #include <subdev/bios/M0203.h>
 #include <engine/gr.h>
@@ -134,6 +135,10 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
 
        if (fb->func->init)
                fb->func->init(fb);
+       if (fb->func->init_page)
+               fb->func->init_page(fb);
+       if (fb->func->init_unkn)
+               fb->func->init_unkn(fb);
        return 0;
 }
 
@@ -171,6 +176,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
        nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
        fb->func = func;
        fb->tile.regions = fb->func->tile.regions;
+       fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
 }
 
 int
index e649ead5ccfc74c7416e78f4294d7d1bcdbbf9b3..76433cc66fff2f1d96d1b4f81c5ab4a09b34e6e8 100644 (file)
@@ -71,6 +71,22 @@ gf100_fb_oneinit(struct nvkm_fb *fb)
        return 0;
 }
 
+void
+gf100_fb_init_page(struct nvkm_fb *fb)
+{
+       struct nvkm_device *device = fb->subdev.device;
+       switch (fb->page) {
+       case 16:
+               nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
+               break;
+       case 17:
+       default:
+               nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
+               fb->page = 17;
+               break;
+       }
+}
+
 void
 gf100_fb_init(struct nvkm_fb *base)
 {
@@ -79,8 +95,6 @@ gf100_fb_init(struct nvkm_fb *base)
 
        if (fb->r100c10_page)
                nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
-
-       nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
 }
 
 void *
@@ -125,6 +139,7 @@ gf100_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gf100_fb_init,
+       .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gf100_ram_new,
        .memtype_valid = gf100_fb_memtype_valid,
index 2160e5a39c9a928a316049b5657bad2b0b4ebe60..449f431644b31ab90a0245feebf03d7023be88b3 100644 (file)
@@ -14,4 +14,6 @@ int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
 void *gf100_fb_dtor(struct nvkm_fb *);
 void gf100_fb_init(struct nvkm_fb *);
 void gf100_fb_intr(struct nvkm_fb *);
+
+void gp100_fb_init(struct nvkm_fb *);
 #endif
index b41f0f70038cb4c8a9d2d0ee464ed7a6fc768392..4245e2e6e604501fa9ead8ad0ed3582f9afee6cc 100644 (file)
@@ -29,6 +29,7 @@ gk104_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gf100_fb_init,
+       .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gk104_ram_new,
        .memtype_valid = gf100_fb_memtype_valid,
index 7306f7dfc3b93f4455942e9c2cb8387c51320669..f815fe2bbf08892dd52654552600c33843392f0b 100644 (file)
@@ -27,7 +27,6 @@ static void
 gk20a_fb_init(struct nvkm_fb *fb)
 {
        struct nvkm_device *device = fb->subdev.device;
-       nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
        nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
        nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
 }
@@ -36,6 +35,7 @@ static const struct nvkm_fb_func
 gk20a_fb = {
        .oneinit = gf100_fb_oneinit,
        .init = gk20a_fb_init,
+       .init_page = gf100_fb_init_page,
        .memtype_valid = gf100_fb_memtype_valid,
 };
 
index 4869fdb753c971f7cc35a880cd3bad06c3efa031..db699025f5464711c8b02972c32ea32ee5076a59 100644 (file)
@@ -29,6 +29,7 @@ gm107_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gf100_fb_init,
+       .init_page = gf100_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gm107_ram_new,
        .memtype_valid = gf100_fb_memtype_valid,
index 44f5716f64d88dfff577250b6a8be0b46350019c..62f653240be3103a76d58e506b8ab44acf2d8739 100644 (file)
 
 #include <core/memory.h>
 
+void
+gm200_fb_init_page(struct nvkm_fb *fb)
+{
+       struct nvkm_device *device = fb->subdev.device;
+       switch (fb->page) {
+       case 16:
+               nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
+               break;
+       case 17:
+               nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
+               break;
+       default:
+               nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
+               fb->page = 0;
+               break;
+       }
+}
+
 static void
 gm200_fb_init(struct nvkm_fb *base)
 {
@@ -48,6 +66,7 @@ gm200_fb = {
        .dtor = gf100_fb_dtor,
        .oneinit = gf100_fb_oneinit,
        .init = gm200_fb_init,
+       .init_page = gm200_fb_init_page,
        .intr = gf100_fb_intr,
        .ram_new = gm107_ram_new,
        .memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
new file mode 100644 (file)
index 0000000..98474ae
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static void
+gp100_fb_init_unkn(struct nvkm_fb *base)
+{
+       struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
+       nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80));
+       nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4));
+       nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8));
+       nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
+}
+
+void
+gp100_fb_init(struct nvkm_fb *base)
+{
+       struct gf100_fb *fb = gf100_fb(base);
+       struct nvkm_device *device = fb->base.subdev.device;
+
+       if (fb->r100c10_page)
+               nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+       nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
+       nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
+       nvkm_mask(device, 0x100cc4, 0x00060000,
+                 max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
+}
+
+static const struct nvkm_fb_func
+gp100_fb = {
+       .dtor = gf100_fb_dtor,
+       .oneinit = gf100_fb_oneinit,
+       .init = gp100_fb_init,
+       .init_page = gm200_fb_init_page,
+       .init_unkn = gp100_fb_init_unkn,
+       .ram_new = gp100_ram_new,
+       .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+       return gf100_fb_new_(&gp100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
new file mode 100644 (file)
index 0000000..92cb718
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static const struct nvkm_fb_func
+gp104_fb = {
+       .dtor = gf100_fb_dtor,
+       .oneinit = gf100_fb_oneinit,
+       .init = gp100_fb_init,
+       .init_page = gm200_fb_init_page,
+       .ram_new = gp100_ram_new,
+       .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+       return gf100_fb_new_(&gp104_fb, device, index, pfb);
+}
index d97d640e60a0a32e5e92af83c116128ca8559dce..e905d44fa1d59a88bf349e8bbcd467f49d7a7a00 100644 (file)
@@ -8,6 +8,8 @@ struct nvkm_fb_func {
        void *(*dtor)(struct nvkm_fb *);
        int (*oneinit)(struct nvkm_fb *);
        void (*init)(struct nvkm_fb *);
+       void (*init_page)(struct nvkm_fb *);
+       void (*init_unkn)(struct nvkm_fb *);
        void (*intr)(struct nvkm_fb *);
 
        struct {
@@ -60,5 +62,8 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
                       u32 pitch, u32 flags, struct nvkm_fb_tile *);
 
 int gf100_fb_oneinit(struct nvkm_fb *);
+void gf100_fb_init_page(struct nvkm_fb *);
 bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
+
+void gm200_fb_init_page(struct nvkm_fb *);
 #endif
index f816cbf2ced32f02a310b112cee757024acb1ab3..b9ec0ae6723ad1c71b93802c50236d27d0bc12bc 100644 (file)
@@ -47,4 +47,5 @@ int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
new file mode 100644 (file)
index 0000000..f3be408
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static int
+gp100_ram_init(struct nvkm_ram *ram)
+{
+       struct nvkm_subdev *subdev = &ram->fb->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_bios *bios = device->bios;
+       u8  ver, hdr, cnt, len, snr, ssz;
+       u32 data;
+       int i;
+
+       /* run a bunch of tables from rammap table.  there's actually
+        * individual pointers for each rammap entry too, but, nvidia
+        * seem to just run the last two entries' scripts early on in
+        * their init, and never again.. we'll just run 'em all once
+        * for now.
+        *
+        * i strongly suspect that each script is for a separate mode
+        * (likely selected by 0x9a065c's lower bits?), and the
+        * binary driver skips the one that's already been setup by
+        * the init tables.
+        */
+       data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
+       if (!data || hdr < 0x15)
+               return -EINVAL;
+
+       cnt  = nvbios_rd08(bios, data + 0x14); /* guess at count */
+       data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
+       if (cnt) {
+               u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0;
+               for (i = 0; i < cnt; i++, data += 4) {
+                       if (i != save >> 4) {
+                               nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4);
+                               nvbios_exec(&(struct nvbios_init) {
+                                               .subdev = subdev,
+                                               .bios = bios,
+                                               .offset = nvbios_rd32(bios, data),
+                                               .execute = 1,
+                                           });
+                       }
+               }
+               nvkm_mask(device, 0x9a065c, 0x000000f0, save);
+       }
+
+       nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000);
+       nvkm_wr32(device, 0x10ecc0, 0xffffffff);
+       nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010);
+       return 0;
+}
+
+static const struct nvkm_ram_func
+gp100_ram_func = {
+       .init = gp100_ram_init,
+       .get = gf100_ram_get,
+       .put = gf100_ram_put,
+};
+
+int
+gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+       struct nvkm_ram *ram;
+       struct nvkm_subdev *subdev = &fb->subdev;
+       struct nvkm_device *device = subdev->device;
+       enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+       const u32 rsvd_head = ( 256 * 1024); /* vga memory */
+       const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
+       u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
+       u32 fbio_opt = nvkm_rd32(device, 0x021c14);
+       u64 part, size = 0, comm = ~0ULL;
+       bool mixed = false;
+       int ret;
+
+       nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
+       nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
+       for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
+               if (!(fbio_opt & (1 << fbpa))) {
+                       part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
+                       nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
+                       part = part << 20;
+                       if (part != comm) {
+                               if (comm != ~0ULL)
+                                       mixed = true;
+                               comm = min(comm, part);
+                       }
+                       size = size + part;
+               }
+       }
+
+       ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
+       *pram = ram;
+       if (ret)
+               return ret;
+
+       nvkm_mm_fini(&ram->vram);
+
+       if (mixed) {
+               ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+                                  ((comm * fbpa_num) - rsvd_head) >>
+                                  NVKM_RAM_MM_SHIFT, 1);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
+                                  NVKM_RAM_MM_SHIFT,
+                                  (size - (comm * fbpa_num) - rsvd_tail) >>
+                                  NVKM_RAM_MM_SHIFT, 1);
+               if (ret)
+                       return ret;
+       } else {
+               ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+                                  (size - rsvd_head - rsvd_tail) >>
+                                  NVKM_RAM_MM_SHIFT, 1);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
index 323c79abe46855f45ec5e91a722324f93d183a8a..41bd5d0f76927e1e2f38ffe7e1d3c228a4d05139 100644 (file)
@@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
                struct pwr_rail_t *r = &stbl.rail[i];
                struct nvkm_iccsense_rail *rail;
                struct nvkm_iccsense_sensor *sensor;
+               int (*read)(struct nvkm_iccsense *,
+                           struct nvkm_iccsense_rail *);
 
                if (!r->mode || r->resistor_mohm == 0)
                        continue;
@@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
                if (!sensor)
                        continue;
 
-               rail = kmalloc(sizeof(*rail), GFP_KERNEL);
-               if (!rail)
-                       return -ENOMEM;
-
                switch (sensor->type) {
                case NVBIOS_EXTDEV_INA209:
                        if (r->rail != 0)
                                continue;
-                       rail->read = nvkm_iccsense_ina209_read;
+                       read = nvkm_iccsense_ina209_read;
                        break;
                case NVBIOS_EXTDEV_INA219:
                        if (r->rail != 0)
                                continue;
-                       rail->read = nvkm_iccsense_ina219_read;
+                       read = nvkm_iccsense_ina219_read;
                        break;
                case NVBIOS_EXTDEV_INA3221:
                        if (r->rail >= 3)
                                continue;
-                       rail->read = nvkm_iccsense_ina3221_read;
+                       read = nvkm_iccsense_ina3221_read;
                        break;
                default:
                        continue;
                }
 
+               rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+               if (!rail)
+                       return -ENOMEM;
                sensor->rail_mask |= 1 << r->rail;
+               rail->read = read;
                rail->sensor = sensor;
                rail->idx = r->rail;
                rail->mohm = r->resistor_mohm;
index 932b366598aa1a215d6408e2b1c198ab82c91236..12d6f4f102cb6d3c65244dcd1729bae36f89d9de 100644 (file)
@@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ltc/gf100.o
 nvkm-y += nvkm/subdev/ltc/gk104.o
 nvkm-y += nvkm/subdev/ltc/gm107.o
 nvkm-y += nvkm/subdev/ltc/gm200.o
+nvkm-y += nvkm/subdev/ltc/gp100.o
index c9eb677967a84455d325f7eef9fc9f0128ca41b2..4a0fa0a9b80287b5f68b292764f985502a27277d 100644 (file)
@@ -23,7 +23,6 @@
  */
 #include "priv.h"
 
-#include <core/enum.h>
 #include <subdev/fb.h>
 #include <subdev/timer.h>
 
@@ -71,7 +70,7 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
        nvkm_wr32(device, 0x17ea58, depth);
 }
 
-static const struct nvkm_bitfield
+const struct nvkm_bitfield
 gf100_ltc_lts_intr_name[] = {
        { 0x00000001, "IDLE_ERROR_IQ" },
        { 0x00000002, "IDLE_ERROR_CBC" },
index e292f5679418ccfdf23e53e9dbbec7b92b231539..ec0a3844b2d14995e342e0fdaa9fa638bdc17de8 100644 (file)
@@ -68,18 +68,22 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
        nvkm_wr32(device, 0x17e34c, depth);
 }
 
-static void
-gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
+void
+gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
 {
        struct nvkm_subdev *subdev = &ltc->subdev;
        struct nvkm_device *device = subdev->device;
-       u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
-       u32 stat = nvkm_rd32(device, base + 0x00c);
+       u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
+       u32 intr = nvkm_rd32(device, base + 0x00c);
+       u16 stat = intr & 0x0000ffff;
+       char msg[128];
 
        if (stat) {
-               nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat);
-               nvkm_wr32(device, base + 0x00c, stat);
+               nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
+               nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg);
        }
+
+       nvkm_wr32(device, base + 0x00c, intr);
 }
 
 void
@@ -92,7 +96,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
        while (mask) {
                u32 s, c = __ffs(mask);
                for (s = 0; s < ltc->lts_nr; s++)
-                       gm107_ltc_lts_isr(ltc, c, s);
+                       gm107_ltc_intr_lts(ltc, c, s);
                mask &= ~(1 << c);
        }
 }
index 2a29bfd5125aef5c5f8aa97c7ce0bd91d61dfd94..e18e0dc19ec8b1f988c2ea30ec4e5d274af0b59f 100644 (file)
@@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
 gm200_ltc = {
        .oneinit = gm200_ltc_oneinit,
        .init = gm200_ltc_init,
-       .intr = gm107_ltc_intr, /*XXX: not validated */
+       .intr = gm107_ltc_intr,
        .cbc_clear = gm107_ltc_cbc_clear,
        .cbc_wait = gm107_ltc_cbc_wait,
        .zbc = 16,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
new file mode 100644 (file)
index 0000000..0bdfb2f
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static void
+gp100_ltc_intr(struct nvkm_ltc *ltc)
+{
+       struct nvkm_device *device = ltc->subdev.device;
+       u32 mask;
+
+       mask = nvkm_rd32(device, 0x0001c0);
+       while (mask) {
+               u32 s, c = __ffs(mask);
+               for (s = 0; s < ltc->lts_nr; s++)
+                       gm107_ltc_intr_lts(ltc, c, s);
+               mask &= ~(1 << c);
+       }
+}
+
+static int
+gp100_ltc_oneinit(struct nvkm_ltc *ltc)
+{
+       struct nvkm_device *device = ltc->subdev.device;
+       ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
+       ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
+       /*XXX: tagram allocation - TBD */
+       return nvkm_mm_init(&ltc->tags, 0, 0, 1);
+}
+
+static void
+gp100_ltc_init(struct nvkm_ltc *ltc)
+{
+       /*XXX: PMU LS call to setup tagram address */
+}
+
+static const struct nvkm_ltc_func
+gp100_ltc = {
+       .oneinit = gp100_ltc_oneinit,
+       .init = gp100_ltc_init,
+       .intr = gp100_ltc_intr,
+       .cbc_clear = gm107_ltc_cbc_clear,
+       .cbc_wait = gm107_ltc_cbc_wait,
+       .zbc = 16,
+       .zbc_clear_color = gm107_ltc_zbc_clear_color,
+       .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+       .invalidate = gf100_ltc_invalidate,
+       .flush = gf100_ltc_flush,
+};
+
+int
+gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
+{
+       return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
+}
index 6d81c695ed0d9f5312f85be51b6f1829ccee093b..8b95f96e3ffa1fad8cd26cf87a8b9f58ef872192 100644 (file)
@@ -2,6 +2,7 @@
 #define __NVKM_LTC_PRIV_H__
 #define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
 #include <subdev/ltc.h>
+#include <core/enum.h>
 
 int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
                  int index, struct nvkm_ltc **);
@@ -31,8 +32,10 @@ void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
 void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
 void gf100_ltc_invalidate(struct nvkm_ltc *);
 void gf100_ltc_flush(struct nvkm_ltc *);
+extern const struct nvkm_bitfield gf100_ltc_lts_intr_name[];
 
 void gm107_ltc_intr(struct nvkm_ltc *);
+void gm107_ltc_intr_lts(struct nvkm_ltc *, int ltc, int lts);
 void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
 void gm107_ltc_cbc_wait(struct nvkm_ltc *);
 void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
index 49695ac7be2e66331a8b96fae726fff2e17c613f..12943f92c20608e64f8f4b5a90921c35506bcce2 100644 (file)
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/mc/gt215.o
 nvkm-y += nvkm/subdev/mc/gf100.o
 nvkm-y += nvkm/subdev/mc/gk104.o
 nvkm-y += nvkm/subdev/mc/gk20a.o
+nvkm-y += nvkm/subdev/mc/gp100.o
index 350a8caa84c8dbd9b565cb2ea4b71a2b428bb116..6b25e25f9ebaeef8f5609ff635fb41cb77f9b2df 100644 (file)
 #include <subdev/top.h>
 
 void
-nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
+nvkm_mc_unk260(struct nvkm_device *device, u32 data)
 {
-       if (mc->func->unk260)
+       struct nvkm_mc *mc = device->mc;
+       if (likely(mc) && mc->func->unk260)
                mc->func->unk260(mc, data);
 }
 
 void
-nvkm_mc_intr_unarm(struct nvkm_mc *mc)
+nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
 {
-       return mc->func->intr_unarm(mc);
+       struct nvkm_mc *mc = device->mc;
+       const struct nvkm_mc_map *map;
+       if (likely(mc) && mc->func->intr_mask) {
+               u32 mask = nvkm_top_intr_mask(device, devidx);
+               for (map = mc->func->intr; !mask && map->stat; map++) {
+                       if (map->unit == devidx)
+                               mask = map->stat;
+               }
+               mc->func->intr_mask(mc, mask, en ? mask : 0);
+       }
+}
+
+void
+nvkm_mc_intr_unarm(struct nvkm_device *device)
+{
+       struct nvkm_mc *mc = device->mc;
+       if (likely(mc))
+               mc->func->intr_unarm(mc);
 }
 
 void
-nvkm_mc_intr_rearm(struct nvkm_mc *mc)
+nvkm_mc_intr_rearm(struct nvkm_device *device)
 {
-       return mc->func->intr_rearm(mc);
+       struct nvkm_mc *mc = device->mc;
+       if (likely(mc))
+               mc->func->intr_rearm(mc);
 }
 
 static u32
-nvkm_mc_intr_mask(struct nvkm_mc *mc)
+nvkm_mc_intr_stat(struct nvkm_mc *mc)
 {
-       u32 intr = mc->func->intr_mask(mc);
+       u32 intr = mc->func->intr_stat(mc);
        if (WARN_ON_ONCE(intr == 0xffffffff))
                intr = 0; /* likely fallen off the bus */
        return intr;
 }
 
 void
-nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
+nvkm_mc_intr(struct nvkm_device *device, bool *handled)
 {
-       struct nvkm_device *device = mc->subdev.device;
+       struct nvkm_mc *mc = device->mc;
        struct nvkm_subdev *subdev;
-       const struct nvkm_mc_map *map = mc->func->intr;
-       u32 stat, intr = nvkm_mc_intr_mask(mc);
+       const struct nvkm_mc_map *map;
+       u32 stat, intr;
        u64 subdevs;
 
-       stat = nvkm_top_intr(device->top, intr, &subdevs);
+       if (unlikely(!mc))
+               return;
+
+       intr = nvkm_mc_intr_stat(mc);
+       stat = nvkm_top_intr(device, intr, &subdevs);
        while (subdevs) {
                enum nvkm_devidx subidx = __ffs64(subdevs);
                subdev = nvkm_device_subdev(device, subidx);
@@ -72,14 +96,13 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
                subdevs &= ~BIT_ULL(subidx);
        }
 
-       while (map->stat) {
+       for (map = mc->func->intr; map->stat; map++) {
                if (intr & map->stat) {
                        subdev = nvkm_device_subdev(device, map->unit);
                        if (subdev)
                                nvkm_subdev_intr(subdev);
                        stat &= ~map->stat;
                }
-               map++;
        }
 
        if (stat)
@@ -87,22 +110,32 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
        *handled = intr != 0;
 }
 
-static void
-nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+static u32
+nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
+                  enum nvkm_devidx devidx)
 {
-       struct nvkm_device *device = mc->subdev.device;
+       struct nvkm_mc *mc = device->mc;
        const struct nvkm_mc_map *map;
-       u64 pmc_enable;
-
-       if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) {
-               for (map = mc->func->reset; map && map->stat; map++) {
-                       if (map->unit == devidx) {
-                               pmc_enable = map->stat;
-                               break;
+       u64 pmc_enable = 0;
+       if (likely(mc)) {
+               if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
+                       for (map = mc->func->reset; map && map->stat; map++) {
+                               if (!isauto || !map->noauto) {
+                                       if (map->unit == devidx) {
+                                               pmc_enable = map->stat;
+                                               break;
+                                       }
+                               }
                        }
                }
        }
+       return pmc_enable;
+}
 
+void
+nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+       u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
        if (pmc_enable) {
                nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
                nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
@@ -111,17 +144,27 @@ nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
 }
 
 void
-nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
 {
-       if (likely(mc))
-               nvkm_mc_reset_(mc, devidx);
+       u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+       if (pmc_enable)
+               nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+}
+
+void
+nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
+{
+       u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
+       if (pmc_enable) {
+               nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+               nvkm_rd32(device, 0x000200);
+       }
 }
 
 static int
 nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
 {
-       struct nvkm_mc *mc = nvkm_mc(subdev);
-       nvkm_mc_intr_unarm(mc);
+       nvkm_mc_intr_unarm(subdev->device);
        return 0;
 }
 
@@ -131,7 +174,7 @@ nvkm_mc_init(struct nvkm_subdev *subdev)
        struct nvkm_mc *mc = nvkm_mc(subdev);
        if (mc->func->init)
                mc->func->init(mc);
-       nvkm_mc_intr_rearm(mc);
+       nvkm_mc_intr_rearm(subdev->device);
        return 0;
 }
 
@@ -148,16 +191,21 @@ nvkm_mc = {
        .fini = nvkm_mc_fini,
 };
 
+void
+nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
+            int index, struct nvkm_mc *mc)
+{
+       nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
+       mc->func = func;
+}
+
 int
 nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
             int index, struct nvkm_mc **pmc)
 {
        struct nvkm_mc *mc;
-
        if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
                return -ENOMEM;
-
-       nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
-       mc->func = func;
+       nvkm_mc_ctor(func, device, index, *pmc);
        return 0;
 }
index 5c85b47f071d3a81645a5e3a24cfed5dd667947f..c3d66ef5dc129877607cbbbce62f02b39054cbf7 100644 (file)
@@ -57,7 +57,7 @@ g84_mc = {
        .intr = g84_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = g84_mc_reset,
 };
 
index 0280b43cc10c9eb2dfacbe31aee636488443fca1..93ad4982ce5f656544ffdf3ca16f886a32a8f8c2 100644 (file)
@@ -57,7 +57,7 @@ g98_mc = {
        .intr = g98_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = g98_mc_reset,
 };
 
index 8397e223bd4305295cf714463b099f5c63a8b5ac..d2c4d6033abb0a96ec3851e1fa13cd2a1a648aea 100644 (file)
@@ -76,7 +76,7 @@ gf100_mc_intr_rearm(struct nvkm_mc *mc)
 }
 
 u32
-gf100_mc_intr_mask(struct nvkm_mc *mc)
+gf100_mc_intr_stat(struct nvkm_mc *mc)
 {
        struct nvkm_device *device = mc->subdev.device;
        u32 intr0 = nvkm_rd32(device, 0x000100);
@@ -84,6 +84,14 @@ gf100_mc_intr_mask(struct nvkm_mc *mc)
        return intr0 | intr1;
 }
 
+void
+gf100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+       struct nvkm_device *device = mc->subdev.device;
+       nvkm_mask(device, 0x000640, mask, stat);
+       nvkm_mask(device, 0x000644, mask, stat);
+}
+
 void
 gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
 {
@@ -97,6 +105,7 @@ gf100_mc = {
        .intr_unarm = gf100_mc_intr_unarm,
        .intr_rearm = gf100_mc_intr_rearm,
        .intr_mask = gf100_mc_intr_mask,
+       .intr_stat = gf100_mc_intr_stat,
        .reset = gf100_mc_reset,
        .unk260 = gf100_mc_unk260,
 };
index 317464212c7d1eb75b28cf54dbb210912d2c9566..7b8c6ecad1a5d544d7012d1577cc7f8b03f8b06c 100644 (file)
@@ -26,6 +26,7 @@
 const struct nvkm_mc_map
 gk104_mc_reset[] = {
        { 0x00000100, NVKM_ENGINE_FIFO },
+       { 0x00002000, NVKM_SUBDEV_PMU, true },
        {}
 };
 
@@ -53,6 +54,7 @@ gk104_mc = {
        .intr_unarm = gf100_mc_intr_unarm,
        .intr_rearm = gf100_mc_intr_rearm,
        .intr_mask = gf100_mc_intr_mask,
+       .intr_stat = gf100_mc_intr_stat,
        .reset = gk104_mc_reset,
        .unk260 = gf100_mc_unk260,
 };
index 60b044f517ed5d70aec8d8f424322edf4afd821a..ca1bf3279dbe9a046c958cfe744e7f60fd07cfa8 100644 (file)
@@ -30,6 +30,7 @@ gk20a_mc = {
        .intr_unarm = gf100_mc_intr_unarm,
        .intr_rearm = gf100_mc_intr_rearm,
        .intr_mask = gf100_mc_intr_mask,
+       .intr_stat = gf100_mc_intr_stat,
        .reset = gk104_mc_reset,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
new file mode 100644 (file)
index 0000000..4d22f4a
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#define gp100_mc(p) container_of((p), struct gp100_mc, base)
+#include "priv.h"
+
+struct gp100_mc {
+       struct nvkm_mc base;
+       spinlock_t lock;
+       bool intr;
+       u32 mask;
+};
+
+static void
+gp100_mc_intr_update(struct gp100_mc *mc)
+{
+       struct nvkm_device *device = mc->base.subdev.device;
+       u32 mask = mc->intr ? mc->mask : 0, i;
+       for (i = 0; i < 2; i++) {
+               nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
+               nvkm_wr32(device, 0x000160 + (i * 0x04),  mask);
+       }
+}
+
+static void
+gp100_mc_intr_unarm(struct nvkm_mc *base)
+{
+       struct gp100_mc *mc = gp100_mc(base);
+       unsigned long flags;
+       spin_lock_irqsave(&mc->lock, flags);
+       mc->intr = false;
+       gp100_mc_intr_update(mc);
+       spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_rearm(struct nvkm_mc *base)
+{
+       struct gp100_mc *mc = gp100_mc(base);
+       unsigned long flags;
+       spin_lock_irqsave(&mc->lock, flags);
+       mc->intr = true;
+       gp100_mc_intr_update(mc);
+       spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static void
+gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
+{
+       struct gp100_mc *mc = gp100_mc(base);
+       unsigned long flags;
+       spin_lock_irqsave(&mc->lock, flags);
+       mc->mask = (mc->mask & ~mask) | intr;
+       gp100_mc_intr_update(mc);
+       spin_unlock_irqrestore(&mc->lock, flags);
+}
+
+static const struct nvkm_mc_func
+gp100_mc = {
+       .init = nv50_mc_init,
+       .intr = gk104_mc_intr,
+       .intr_unarm = gp100_mc_intr_unarm,
+       .intr_rearm = gp100_mc_intr_rearm,
+       .intr_mask = gp100_mc_intr_mask,
+       .intr_stat = gf100_mc_intr_stat,
+       .reset = gk104_mc_reset,
+};
+
+int
+gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+       struct gp100_mc *mc;
+
+       if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
+               return -ENOMEM;
+       nvkm_mc_ctor(&gp100_mc, device, index, &mc->base);
+       *pmc = &mc->base;
+
+       spin_lock_init(&mc->lock);
+       mc->intr = false;
+       mc->mask = 0x7fffffff;
+       return 0;
+}
index aad0ba95bf180118ea9c5430cd9be6abb9dde93d..99d50a3d956fc312b3d91ade85f3f36f2b944d22 100644 (file)
@@ -53,13 +53,20 @@ gt215_mc_intr[] = {
        {},
 };
 
+static void
+gt215_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
+{
+       nvkm_mask(mc->subdev.device, 0x000640, mask, stat);
+}
+
 static const struct nvkm_mc_func
 gt215_mc = {
        .init = nv50_mc_init,
        .intr = gt215_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_mask = gt215_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = gt215_mc_reset,
 };
 
index a062624e906b3396f4b5b74c3da704be4b1a9765..6509defd14609c1a9675424da7f45f3c6cf5ee8b 100644 (file)
@@ -56,7 +56,7 @@ nv04_mc_intr_rearm(struct nvkm_mc *mc)
 }
 
 u32
-nv04_mc_intr_mask(struct nvkm_mc *mc)
+nv04_mc_intr_stat(struct nvkm_mc *mc)
 {
        return nvkm_rd32(mc->subdev.device, 0x000100);
 }
@@ -75,7 +75,7 @@ nv04_mc = {
        .intr = nv04_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv04_mc_reset,
 };
 
index 55f0b9166b52cb207dc5973076b584630d8674b6..9213107901e6de32f51973f2d6d086e2eb6f2edc 100644 (file)
@@ -39,7 +39,7 @@ nv11_mc = {
        .intr = nv11_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv04_mc_reset,
 };
 
index c40fa67f79a51d3d122a77011824ad34e591e3f0..64bf5bbf8146cb80e6ace6f69bdca9da0e9c0d2a 100644 (file)
@@ -48,7 +48,7 @@ nv17_mc = {
        .intr = nv17_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv17_mc_reset,
 };
 
index cc56271db5643695add777bad34059c27d7f9b49..65fa44a64b985f12437fa3fa7473eaa704b98927 100644 (file)
@@ -43,7 +43,7 @@ nv44_mc = {
        .intr = nv17_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv17_mc_reset,
 };
 
index 343b6078580de8bee982c56c267db9d48a6858a0..fe93b4fd71002ccbfcab3505c0a6b13f4bf039c1 100644 (file)
@@ -50,7 +50,7 @@ nv50_mc = {
        .intr = nv50_mc_intr,
        .intr_unarm = nv04_mc_intr_unarm,
        .intr_rearm = nv04_mc_intr_rearm,
-       .intr_mask = nv04_mc_intr_mask,
+       .intr_stat = nv04_mc_intr_stat,
        .reset = nv17_mc_reset,
 };
 
index a1203811851238b462981a3d5e1a38717926ee33..4f0576a06d2454489ffdcce6d3b601a286653a98 100644 (file)
@@ -3,12 +3,15 @@
 #define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
 #include <subdev/mc.h>
 
+void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *,
+                 int index, struct nvkm_mc *);
 int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
                 int index, struct nvkm_mc **);
 
 struct nvkm_mc_map {
        u32 stat;
        u32 unit;
+       bool noauto;
 };
 
 struct nvkm_mc_func {
@@ -18,8 +21,10 @@ struct nvkm_mc_func {
        void (*intr_unarm)(struct nvkm_mc *);
        /* enable reporting of interrupts to host */
        void (*intr_rearm)(struct nvkm_mc *);
+       /* (un)mask delivery of specific interrupts */
+       void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
        /* retrieve pending interrupt mask (NV_PMC_INTR) */
-       u32 (*intr_mask)(struct nvkm_mc *);
+       u32 (*intr_stat)(struct nvkm_mc *);
        const struct nvkm_mc_map *reset;
        void (*unk260)(struct nvkm_mc *, u32);
 };
@@ -27,7 +32,7 @@ struct nvkm_mc_func {
 void nv04_mc_init(struct nvkm_mc *);
 void nv04_mc_intr_unarm(struct nvkm_mc *);
 void nv04_mc_intr_rearm(struct nvkm_mc *);
-u32 nv04_mc_intr_mask(struct nvkm_mc *);
+u32 nv04_mc_intr_stat(struct nvkm_mc *);
 extern const struct nvkm_mc_map nv04_mc_reset[];
 
 extern const struct nvkm_mc_map nv17_mc_intr[];
@@ -39,7 +44,8 @@ void nv50_mc_init(struct nvkm_mc *);
 
 void gf100_mc_intr_unarm(struct nvkm_mc *);
 void gf100_mc_intr_rearm(struct nvkm_mc *);
-u32 gf100_mc_intr_mask(struct nvkm_mc *);
+void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32);
+u32 gf100_mc_intr_stat(struct nvkm_mc *);
 void gf100_mc_unk260(struct nvkm_mc *, u32);
 
 extern const struct nvkm_mc_map gk104_mc_intr[];
index 3c2519fdeb813afcdab2a20c32bf92bf1add6f67..2a31b7d66a6ddc852aee66cdaae4b196e9a86419 100644 (file)
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/pci/g94.o
 nvkm-y += nvkm/subdev/pci/gf100.o
 nvkm-y += nvkm/subdev/pci/gf106.o
 nvkm-y += nvkm/subdev/pci/gk104.o
+nvkm-y += nvkm/subdev/pci/gp100.o
index 6b0328bd7eede2ee2d43d55152f0b6022218f76b..eb9b278198b229672df91bf6426432ee7d6f9f65 100644 (file)
@@ -69,15 +69,13 @@ static irqreturn_t
 nvkm_pci_intr(int irq, void *arg)
 {
        struct nvkm_pci *pci = arg;
-       struct nvkm_mc *mc = pci->subdev.device->mc;
+       struct nvkm_device *device = pci->subdev.device;
        bool handled = false;
-       if (likely(mc)) {
-               nvkm_mc_intr_unarm(mc);
-               if (pci->msi)
-                       pci->func->msi_rearm(pci);
-               nvkm_mc_intr(mc, &handled);
-               nvkm_mc_intr_rearm(mc);
-       }
+       nvkm_mc_intr_unarm(device);
+       if (pci->msi)
+               pci->func->msi_rearm(pci);
+       nvkm_mc_intr(device, &handled);
+       nvkm_mc_intr_rearm(device);
        return handled ? IRQ_HANDLED : IRQ_NONE;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
new file mode 100644 (file)
index 0000000..82c5234
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static void
+gp100_pci_msi_rearm(struct nvkm_pci *pci)
+{
+       nvkm_pci_wr32(pci, 0x0704, 0x00000000);
+}
+
+static const struct nvkm_pci_func
+gp100_pci_func = {
+       .rd32 = nv40_pci_rd32,
+       .wr08 = nv40_pci_wr08,
+       .wr32 = nv40_pci_wr32,
+       .msi_rearm = gp100_pci_msi_rearm,
+};
+
+int
+gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
+{
+       return nvkm_pci_new_(&gp100_pci_func, device, index, ppci);
+}
index 213fdba6cfa0c8813cdf7276ed3c9dc72e2c40d6..314be2192b7df0451e1f566d3f6897682b3c5573 100644 (file)
@@ -19,8 +19,9 @@
  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-
 #include "priv.h"
+
+#include <subdev/mc.h>
 #include <subdev/timer.h>
 
 static const char *
@@ -70,12 +71,11 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
        int ret;
 
        /* enable engine */
-       nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask);
-       nvkm_rd32(device, 0x200);
+       nvkm_mc_enable(device, sb->devidx);
        ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
        if (ret < 0) {
-               nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
                nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
+               nvkm_mc_disable(device, sb->devidx);
                return ret;
        }
 
@@ -85,8 +85,7 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
 
        /* enable IRQs */
        nvkm_wr32(device, sb->base + 0x010, 0xff);
-       nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask);
-       nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask);
+       nvkm_mc_intr_mask(device, sb->devidx, true);
 
        return 0;
 }
@@ -97,14 +96,13 @@ nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
        struct nvkm_device *device = sb->subdev.device;
 
        /* disable IRQs and wait for any previous code to complete */
-       nvkm_mask(device, 0x644, sb->irq_mask, 0x0);
-       nvkm_mask(device, 0x640, sb->irq_mask, 0x0);
+       nvkm_mc_intr_mask(device, sb->devidx, false);
        nvkm_wr32(device, sb->base + 0x014, 0xff);
 
        falcon_wait_idle(device, sb->base);
 
        /* disable engine */
-       nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
+       nvkm_mc_disable(device, sb->devidx);
 
        return 0;
 }
@@ -216,14 +214,7 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
                return ret;
        }
 
-       /*
-        * Build all blobs - the same blobs can be used to perform secure boot
-        * multiple times
-        */
-       if (sb->func->prepare_blobs)
-               ret = sb->func->prepare_blobs(sb);
-
-       return ret;
+       return 0;
 }
 
 static int
@@ -270,9 +261,8 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
        /* setup the performing falcon's base address and masks */
        switch (func->boot_falcon) {
        case NVKM_SECBOOT_FALCON_PMU:
+               sb->devidx = NVKM_SUBDEV_PMU;
                sb->base = 0x10a000;
-               sb->irq_mask = 0x1000000;
-               sb->enable_mask = 0x2000;
                break;
        default:
                nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
index cc100dc940eaa091bc964a52b9031c5b3c75b6ff..f1e2dc914366d6bf2d10b5c18eb530ac6c4124ce 100644 (file)
@@ -860,6 +860,8 @@ gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb)
 
        /* Write LS blob */
        ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
+       if (ret)
+               nvkm_gpuobj_del(&gsb->ls_blob);
 
 cleanup:
        ls_ucode_mgr_cleanup(&mgr);
@@ -1023,29 +1025,34 @@ gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb)
        int ret;
 
        /* Load and prepare the managed falcon's firmwares */
-       ret = gm200_secboot_prepare_ls_blob(gsb);
-       if (ret)
-               return ret;
+       if (!gsb->ls_blob) {
+               ret = gm200_secboot_prepare_ls_blob(gsb);
+               if (ret)
+                       return ret;
+       }
 
        /* Load the HS firmware that will load the LS firmwares */
-       ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
-                                           &gsb->acr_load_blob,
-                                           &gsb->acr_load_bl_desc, true);
-       if (ret)
-               return ret;
+       if (!gsb->acr_load_blob) {
+               ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
+                                               &gsb->acr_load_blob,
+                                               &gsb->acr_load_bl_desc, true);
+               if (ret)
+                       return ret;
+       }
 
        /* Load the HS firmware bootloader */
-       ret = gm200_secboot_prepare_hsbl_blob(gsb);
-       if (ret)
-               return ret;
+       if (!gsb->hsbl_blob) {
+               ret = gm200_secboot_prepare_hsbl_blob(gsb);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
 
 static int
-gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
+gm200_secboot_prepare_blobs(struct gm200_secboot *gsb)
 {
-       struct gm200_secboot *gsb = gm200_secboot(sb);
        int ret;
 
        ret = gm20x_secboot_prepare_blobs(gsb);
@@ -1053,15 +1060,37 @@ gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
                return ret;
 
        /* dGPU only: load the HS firmware that unprotects the WPR region */
-       ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
-                                           &gsb->acr_unload_blob,
-                                           &gsb->acr_unload_bl_desc, false);
-       if (ret)
-               return ret;
+       if (!gsb->acr_unload_blob) {
+               ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
+                                              &gsb->acr_unload_blob,
+                                              &gsb->acr_unload_bl_desc, false);
+               if (ret)
+                       return ret;
+       }
 
        return 0;
 }
 
+static int
+gm200_secboot_blobs_ready(struct gm200_secboot *gsb)
+{
+       struct nvkm_subdev *subdev = &gsb->base.subdev;
+       int ret;
+
+       /* firmware already loaded, nothing to do... */
+       if (gsb->firmware_ok)
+               return 0;
+
+       ret = gsb->func->prepare_blobs(gsb);
+       if (ret) {
+               nvkm_error(subdev, "failed to load secure firmware\n");
+               return ret;
+       }
+
+       gsb->firmware_ok = true;
+
+       return 0;
+}
 
 
 /*
@@ -1234,6 +1263,11 @@ gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
        struct gm200_secboot *gsb = gm200_secboot(sb);
        int ret;
 
+       /* Make sure all blobs are ready */
+       ret = gm200_secboot_blobs_ready(gsb);
+       if (ret)
+               return ret;
+
        /*
         * Dummy GM200 implementation: perform secure boot each time we are
         * called on FECS. Since only FECS and GPCCS are managed and started
@@ -1373,7 +1407,6 @@ gm200_secboot = {
        .dtor = gm200_secboot_dtor,
        .init = gm200_secboot_init,
        .fini = gm200_secboot_fini,
-       .prepare_blobs = gm200_secboot_prepare_blobs,
        .reset = gm200_secboot_reset,
        .start = gm200_secboot_start,
        .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
@@ -1415,6 +1448,7 @@ gm200_secboot_func = {
        .bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
        .fixup_bl_desc = gm200_secboot_fixup_bl_desc,
        .fixup_hs_desc = gm200_secboot_fixup_hs_desc,
+       .prepare_blobs = gm200_secboot_prepare_blobs,
 };
 
 int
@@ -1487,3 +1521,19 @@ MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
 MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
 MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
 MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
index 684320484b70186bb361e4357f2d458c0a2562d9..d5395ebfe8d345e1b1114d7f12caaa360973c108 100644 (file)
@@ -42,6 +42,32 @@ struct gm20b_flcn_bl_desc {
        u32 data_size;
 };
 
+static int
+gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
+{
+       struct nvkm_subdev *subdev = &gsb->base.subdev;
+       int acr_size;
+       int ret;
+
+       ret = gm20x_secboot_prepare_blobs(gsb);
+       if (ret)
+               return ret;
+
+       acr_size = gsb->acr_load_blob->size;
+       /*
+        * On Tegra the WPR region is set by the bootloader. It is illegal for
+        * the HS blob to be larger than this region.
+        */
+       if (acr_size > gsb->wpr_size) {
+               nvkm_error(subdev, "WPR region too small for FW blob!\n");
+               nvkm_error(subdev, "required: %dB\n", acr_size);
+               nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size);
+               return -ENOSPC;
+       }
+
+       return 0;
+}
+
 /**
  * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
  *
@@ -88,6 +114,7 @@ gm20b_secboot_func = {
        .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
        .fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
        .fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
+       .prepare_blobs = gm20b_secboot_prepare_blobs,
 };
 
 
@@ -146,32 +173,6 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
 }
 #endif
 
-static int
-gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb)
-{
-       struct gm200_secboot *gsb = gm200_secboot(sb);
-       int acr_size;
-       int ret;
-
-       ret = gm20x_secboot_prepare_blobs(gsb);
-       if (ret)
-               return ret;
-
-       acr_size = gsb->acr_load_blob->size;
-       /*
-        * On Tegra the WPR region is set by the bootloader. It is illegal for
-        * the HS blob to be larger than this region.
-        */
-       if (acr_size > gsb->wpr_size) {
-               nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n");
-               nvkm_error(&sb->subdev, "required: %dB\n", acr_size);
-               nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size);
-               return -ENOSPC;
-       }
-
-       return 0;
-}
-
 static int
 gm20b_secboot_init(struct nvkm_secboot *sb)
 {
@@ -189,7 +190,6 @@ static const struct nvkm_secboot_func
 gm20b_secboot = {
        .dtor = gm200_secboot_dtor,
        .init = gm20b_secboot_init,
-       .prepare_blobs = gm20b_secboot_prepare_blobs,
        .reset = gm200_secboot_reset,
        .start = gm200_secboot_start,
        .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
index f2b09dee7c5d1eead4d037b62443f3c2ede2bc93..a9a8a0e1017e8921187e5b70074ea794e5e93222 100644 (file)
@@ -30,7 +30,6 @@ struct nvkm_secboot_func {
        int (*init)(struct nvkm_secboot *);
        int (*fini)(struct nvkm_secboot *, bool suspend);
        void *(*dtor)(struct nvkm_secboot *);
-       int (*prepare_blobs)(struct nvkm_secboot *);
        int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
        int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
 
@@ -147,10 +146,8 @@ struct hsflcn_acr_desc {
  * @inst:              instance block for HS falcon
  * @pgd:               page directory for the HS falcon
  * @vm:                        address space used by the HS falcon
- * @bl_desc_size:      size of the BL descriptor used by this chip.
- * @fixup_bl_desc:     hook that generates the proper BL descriptor format from
- *                     the generic GM200 format into a data array of size
- *                     bl_desc_size
+ * @falcon_state:      current state of the managed falcons
+ * @firmware_ok:       whether the firmware blobs have been created
  */
 struct gm200_secboot {
        struct nvkm_secboot base;
@@ -196,9 +193,19 @@ struct gm200_secboot {
                RUNNING,
        } falcon_state[NVKM_SECBOOT_FALCON_END];
 
+       bool firmware_ok;
 };
 #define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
 
+/**
+ * Contains functions we wish to abstract between GM200-like implementations
+ * @bl_desc_size:      size of the BL descriptor used by this chip.
+ * @fixup_bl_desc:     hook that generates the proper BL descriptor format from
+ *                     the generic GM200 format into a data array of size
+ *                     bl_desc_size
+ * @fixup_hs_desc:     hook that twiddles the HS descriptor before it is used
+ * @prepare_blobs:     prepares the various blobs needed for secure booting
+ */
 struct gm200_secboot_func {
        /*
         * Size of the bootloader descriptor for this chip. A block of this
@@ -214,6 +221,7 @@ struct gm200_secboot_func {
         * we want the HS FW to set up.
         */
        void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
+       int (*prepare_blobs)(struct gm200_secboot *);
 };
 
 int gm200_secboot_init(struct nvkm_secboot *);
index a1b264664aad9cb2e4785a873cb984b647c27c50..fe063d5728e21b600556cfb6724cde8d979800c9 100644 (file)
@@ -41,8 +41,9 @@ nvkm_top_device_new(struct nvkm_top *top)
 }
 
 u32
-nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
+nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
 {
+       struct nvkm_top *top = device->top;
        struct nvkm_top_device *info;
 
        if (top) {
@@ -56,8 +57,25 @@ nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
 }
 
 u32
-nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
+nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
 {
+       struct nvkm_top *top = device->top;
+       struct nvkm_top_device *info;
+
+       if (top) {
+               list_for_each_entry(info, &top->device, head) {
+                       if (info->index == devidx && info->intr >= 0)
+                               return BIT(info->intr);
+               }
+       }
+
+       return 0;
+}
+
+u32
+nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
+{
+       struct nvkm_top *top = device->top;
        struct nvkm_top_device *info;
        u64 subdevs = 0;
        u32 handled = 0;
@@ -78,8 +96,9 @@ nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
 }
 
 enum nvkm_devidx
-nvkm_top_fault(struct nvkm_top *top, int fault)
+nvkm_top_fault(struct nvkm_device *device, int fault)
 {
+       struct nvkm_top *top = device->top;
        struct nvkm_top_device *info;
 
        list_for_each_entry(info, &top->device, head) {
@@ -91,8 +110,9 @@ nvkm_top_fault(struct nvkm_top *top, int fault)
 }
 
 enum nvkm_devidx
-nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn)
+nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
 {
+       struct nvkm_top *top = device->top;
        struct nvkm_top_device *info;
        int n = 0;
 
index e06acc340e99b8c5720fc379808e3f364d9c216a..efac3402f9ddb0663391197dc4cdfea879931c60 100644 (file)
@@ -29,7 +29,7 @@ gk104_top_oneinit(struct nvkm_top *top)
        struct nvkm_subdev *subdev = &top->subdev;
        struct nvkm_device *device = subdev->device;
        struct nvkm_top_device *info = NULL;
-       u32 data, type;
+       u32 data, type, inst;
        int i;
 
        for (i = 0; i < 64; i++) {
@@ -37,6 +37,7 @@ gk104_top_oneinit(struct nvkm_top *top)
                        if (!(info = nvkm_top_device_new(top)))
                                return -ENOMEM;
                        type = ~0;
+                       inst = 0;
                }
 
                data = nvkm_rd32(device, 0x022700 + (i * 0x04));
@@ -45,6 +46,7 @@ gk104_top_oneinit(struct nvkm_top *top)
                case 0x00000000: /* NOT_VALID */
                        continue;
                case 0x00000001: /* DATA */
+                       inst        = (data & 0x3c000000) >> 26;
                        info->addr  = (data & 0x00fff000);
                        info->fault = (data & 0x000000f8) >> 3;
                        break;
@@ -67,27 +69,32 @@ gk104_top_oneinit(struct nvkm_top *top)
                        continue;
 
                /* Translate engine type to NVKM engine identifier. */
+#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
+#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1)      \
+               info->index = NVKM_ENGINE_##A##0 + inst
                switch (type) {
-               case 0x00000000: info->index = NVKM_ENGINE_GR; break;
-               case 0x00000001: info->index = NVKM_ENGINE_CE0; break;
-               case 0x00000002: info->index = NVKM_ENGINE_CE1; break;
-               case 0x00000003: info->index = NVKM_ENGINE_CE2; break;
-               case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break;
-               case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break;
-               case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break;
-               case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break;
-               case 0x0000000c: info->index = NVKM_ENGINE_VIC; break;
-               case 0x0000000d: info->index = NVKM_ENGINE_SEC; break;
-               case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break;
-               case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break;
-               case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break;
+               case 0x00000000: A_(GR    ); break;
+               case 0x00000001: A_(CE0   ); break;
+               case 0x00000002: A_(CE1   ); break;
+               case 0x00000003: A_(CE2   ); break;
+               case 0x00000008: A_(MSPDEC); break;
+               case 0x00000009: A_(MSPPP ); break;
+               case 0x0000000a: A_(MSVLD ); break;
+               case 0x0000000b: A_(MSENC ); break;
+               case 0x0000000c: A_(VIC   ); break;
+               case 0x0000000d: A_(SEC   ); break;
+               case 0x0000000e: B_(NVENC ); break;
+               case 0x0000000f: A_(NVENC1); break;
+               case 0x00000010: A_(NVDEC ); break;
+               case 0x00000013: B_(CE    ); break;
                        break;
                default:
                        break;
                }
 
-               nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d "
-                                  "runlist %2d intr %2d reset %2d\n", type,
+               nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
+                                  "engine %2d runlist %2d intr %2d "
+                                  "reset %2d\n", type, inst,
                           info->index == NVKM_SUBDEV_NR ? NULL :
                                          nvkm_subdev_name[info->index],
                           info->addr, info->fault, info->engine, info->runlist,
index 6b2d7531a7ff1beff1ee943745a0d5328e108940..1c3d23b0e84ad063c0c11310828327e232ac03bd 100644 (file)
@@ -120,6 +120,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
 
        data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
        if (data && info.vidmask && info.base && info.step) {
+               volt->min_uv = info.min;
+               volt->max_uv = info.max;
                for (i = 0; i < info.vidmask + 1; i++) {
                        if (info.base >= info.min &&
                                info.base <= info.max) {
@@ -131,6 +133,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
                }
                volt->vid_mask = info.vidmask;
        } else if (data && info.vidmask) {
+               volt->min_uv = 0xffffffff;
+               volt->max_uv = 0;
                for (i = 0; i < cnt; i++) {
                        data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
                                                       &ivid);
@@ -138,9 +142,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
                                volt->vid[volt->vid_nr].uv = ivid.voltage;
                                volt->vid[volt->vid_nr].vid = ivid.vid;
                                volt->vid_nr++;
+                               volt->min_uv = min(volt->min_uv, ivid.voltage);
+                               volt->max_uv = max(volt->max_uv, ivid.voltage);
                        }
                }
                volt->vid_mask = info.vidmask;
+       } else if (data && info.type == NVBIOS_VOLT_PWM) {
+               volt->min_uv = info.base;
+               volt->max_uv = info.base + info.pwm_range;
        }
 }
 
@@ -181,8 +190,11 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
        volt->func = func;
 
        /* Assuming the non-bios device should build the voltage table later */
-       if (bios)
+       if (bios) {
                nvkm_volt_parse_bios(bios, volt);
+               nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
+                          volt->min_uv, volt->max_uv);
+       }
 
        if (volt->vid_nr) {
                for (i = 0; i < volt->vid_nr; i++) {
index d554455326da65c0c879b36525f7ed40072fae1d..ce5d83cdc7cf7900cf86060750b9398ef24063d9 100644 (file)
@@ -77,18 +77,19 @@ gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
        return mv;
 }
 
-int
+static int
 gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
 {
+       static const int v_scale = 1000;
        int mv;
 
        mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
-       mv = DIV_ROUND_UP(mv, 1000);
+       mv = DIV_ROUND_UP(mv, v_scale);
 
        return mv * 1000;
 }
 
-int
+static int
 gk20a_volt_vid_get(struct nvkm_volt *base)
 {
        struct gk20a_volt *volt = gk20a_volt(base);
@@ -103,7 +104,7 @@ gk20a_volt_vid_get(struct nvkm_volt *base)
        return -EINVAL;
 }
 
-int
+static int
 gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
 {
        struct gk20a_volt *volt = gk20a_volt(base);
@@ -113,7 +114,7 @@ gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
        return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
 }
 
-int
+static int
 gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
 {
        struct gk20a_volt *volt = gk20a_volt(base);
@@ -143,9 +144,9 @@ gk20a_volt = {
 };
 
 int
-_gk20a_volt_ctor(struct nvkm_device *device, int index,
-                const struct cvb_coef *coefs, int nb_coefs,
-                struct gk20a_volt *volt)
+gk20a_volt_ctor(struct nvkm_device *device, int index,
+               const struct cvb_coef *coefs, int nb_coefs,
+               int vmin, struct gk20a_volt *volt)
 {
        struct nvkm_device_tegra *tdev = device->func->tegra(device);
        int i, uv;
@@ -160,9 +161,9 @@ _gk20a_volt_ctor(struct nvkm_device *device, int index,
        volt->base.vid_nr = nb_coefs;
        for (i = 0; i < volt->base.vid_nr; i++) {
                volt->base.vid[i].vid = i;
-               volt->base.vid[i].uv =
-                       gk20a_volt_calc_voltage(&coefs[i],
-                                               tdev->gpu_speedo);
+               volt->base.vid[i].uv = max(
+                       gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo),
+                       vmin);
                nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
                           volt->base.vid[i].vid, volt->base.vid[i].uv);
        }
@@ -180,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
                return -ENOMEM;
        *pvolt = &volt->base;
 
-       return _gk20a_volt_ctor(device, index, gk20a_cvb_coef,
-                               ARRAY_SIZE(gk20a_cvb_coef), volt);
+       return gk20a_volt_ctor(device, index, gk20a_cvb_coef,
+                              ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
 }
index 0fa3b502bcf802c995070bf3a150821c501bd047..6a6c97f9684e0c1e560d03209fbed77755ac4dac 100644 (file)
@@ -37,13 +37,8 @@ struct gk20a_volt {
        struct regulator *vdd;
 };
 
-int _gk20a_volt_ctor(struct nvkm_device *device, int index,
-                    const struct cvb_coef *coefs, int nb_coefs,
-                    struct gk20a_volt *volt);
-
-int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo);
-int gk20a_volt_vid_get(struct nvkm_volt *volt);
-int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid);
-int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition);
+int gk20a_volt_ctor(struct nvkm_device *device, int index,
+                   const struct cvb_coef *coefs, int nb_coefs,
+                   int vmin, struct gk20a_volt *volt);
 
 #endif
index 49b5ecb701e4c1ba91a1edc20a59e37c8d3344f5..74db4d28930fd410994041a2e927c34d6876cbac 100644 (file)
@@ -41,16 +41,52 @@ const struct cvb_coef gm20b_cvb_coef[] = {
        /* 921600 */ { 2647676, -106455, 1632 },
 };
 
+static const struct cvb_coef gm20b_na_cvb_coef[] = {
+       /* KHz,         c0,     c1,   c2,    c3,     c4,   c5 */
+       /*  76800 */ {  814294, 8144, -940, 808, -21583, 226 },
+       /* 153600 */ {  856185, 8144, -940, 808, -21583, 226 },
+       /* 230400 */ {  898077, 8144, -940, 808, -21583, 226 },
+       /* 307200 */ {  939968, 8144, -940, 808, -21583, 226 },
+       /* 384000 */ {  981860, 8144, -940, 808, -21583, 226 },
+       /* 460800 */ { 1023751, 8144, -940, 808, -21583, 226 },
+       /* 537600 */ { 1065642, 8144, -940, 808, -21583, 226 },
+       /* 614400 */ { 1107534, 8144, -940, 808, -21583, 226 },
+       /* 691200 */ { 1149425, 8144, -940, 808, -21583, 226 },
+       /* 768000 */ { 1191317, 8144, -940, 808, -21583, 226 },
+       /* 844800 */ { 1233208, 8144, -940, 808, -21583, 226 },
+       /* 921600 */ { 1275100, 8144, -940, 808, -21583, 226 },
+       /* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
+};
+
+const u32 speedo_to_vmin[] = {
+       /*   0,      1,      2,      3,      4, */
+       950000, 840000, 818750, 840000, 810000,
+};
+
 int
 gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
 {
+       struct nvkm_device_tegra *tdev = device->func->tegra(device);
        struct gk20a_volt *volt;
+       u32 vmin;
+
+       if (tdev->gpu_speedo_id >= ARRAY_SIZE(speedo_to_vmin)) {
+               nvdev_error(device, "unsupported speedo %d\n",
+                           tdev->gpu_speedo_id);
+               return -EINVAL;
+       }
 
        volt = kzalloc(sizeof(*volt), GFP_KERNEL);
        if (!volt)
                return -ENOMEM;
        *pvolt = &volt->base;
 
-       return _gk20a_volt_ctor(device, index, gm20b_cvb_coef,
-                               ARRAY_SIZE(gm20b_cvb_coef), volt);
+       vmin = speedo_to_vmin[tdev->gpu_speedo_id];
+
+       if (tdev->gpu_speedo_id >= 1)
+               return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef,
+                                    ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
+       else
+               return gk20a_volt_ctor(device, index, gm20b_cvb_coef,
+                                       ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
 }
index 73241c4eb7aa53feef7593da1969322ba600c304..556f81f6b2c7ddd67a0587d9e13c1ea883262497 100644 (file)
@@ -2,12 +2,8 @@ config DRM_OMAP
        tristate "OMAP DRM"
        depends on DRM
        depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+       select OMAP2_DSS
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_SYS_FOPS
        default n
        help
          DRM display driver for OMAP2/3/4 based boards.
index 2a618afe0f53b8ead6e2e4026a2216b5ce31d003..c226da145fb3cd62c3bd91ac3070850f0429f474 100644 (file)
@@ -1,80 +1,80 @@
 menu "OMAPDRM External Display Device Drivers"
 
-config DISPLAY_ENCODER_OPA362
+config DRM_OMAP_ENCODER_OPA362
        tristate "OPA362 external analog amplifier"
        help
          Driver for OPA362 external analog TV amplifier controlled
          through a GPIO.
 
-config DISPLAY_ENCODER_TFP410
+config DRM_OMAP_ENCODER_TFP410
         tristate "TFP410 DPI to DVI Encoder"
        help
          Driver for TFP410 DPI to DVI encoder.
 
-config DISPLAY_ENCODER_TPD12S015
+config DRM_OMAP_ENCODER_TPD12S015
         tristate "TPD12S015 HDMI ESD protection and level shifter"
        help
          Driver for TPD12S015, which offers HDMI ESD protection and level
          shifting.
 
-config DISPLAY_CONNECTOR_DVI
+config DRM_OMAP_CONNECTOR_DVI
         tristate "DVI Connector"
        depends on I2C
        help
          Driver for a generic DVI connector.
 
-config DISPLAY_CONNECTOR_HDMI
+config DRM_OMAP_CONNECTOR_HDMI
         tristate "HDMI Connector"
        help
          Driver for a generic HDMI connector.
 
-config DISPLAY_CONNECTOR_ANALOG_TV
+config DRM_OMAP_CONNECTOR_ANALOG_TV
         tristate "Analog TV Connector"
        help
          Driver for a generic analog TV connector.
 
-config DISPLAY_PANEL_DPI
+config DRM_OMAP_PANEL_DPI
        tristate "Generic DPI panel"
        help
          Driver for generic DPI panels.
 
-config DISPLAY_PANEL_DSI_CM
+config DRM_OMAP_PANEL_DSI_CM
        tristate "Generic DSI Command Mode Panel"
        depends on BACKLIGHT_CLASS_DEVICE
        help
          Driver for generic DSI command mode panels.
 
-config DISPLAY_PANEL_SONY_ACX565AKM
+config DRM_OMAP_PANEL_SONY_ACX565AKM
        tristate "ACX565AKM Panel"
        depends on SPI && BACKLIGHT_CLASS_DEVICE
        help
          This is the LCD panel used on Nokia N900
 
-config DISPLAY_PANEL_LGPHILIPS_LB035Q02
+config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02
        tristate "LG.Philips LB035Q02 LCD Panel"
        depends on SPI
        help
          LCD Panel used on the Gumstix Overo Palo35
 
-config DISPLAY_PANEL_SHARP_LS037V7DW01
+config DRM_OMAP_PANEL_SHARP_LS037V7DW01
         tristate "Sharp LS037V7DW01 LCD Panel"
         depends on BACKLIGHT_CLASS_DEVICE
         help
           LCD Panel used in TI's SDP3430 and EVM boards
 
-config DISPLAY_PANEL_TPO_TD028TTEC1
+config DRM_OMAP_PANEL_TPO_TD028TTEC1
         tristate "TPO TD028TTEC1 LCD Panel"
         depends on SPI
         help
           LCD panel used in Openmoko.
 
-config DISPLAY_PANEL_TPO_TD043MTEA1
+config DRM_OMAP_PANEL_TPO_TD043MTEA1
         tristate "TPO TD043MTEA1 LCD Panel"
         depends on SPI
         help
           LCD Panel used in OMAP3 Pandora
 
-config DISPLAY_PANEL_NEC_NL8048HL11
+config DRM_OMAP_PANEL_NEC_NL8048HL11
        tristate "NEC NL8048HL11 Panel"
        depends on SPI
        depends on BACKLIGHT_CLASS_DEVICE
index 9aa176bfbf2e31b317340928a6c546fa5c5a2ff4..46baafb1a83efc5b02d4442cdb65ed2e156c3541 100644 (file)
@@ -1,14 +1,14 @@
-obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o
-obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o
-obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o
-obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
-obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o
-obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o
-obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
-obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
-obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
-obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
-obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
-obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
+obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
+obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
+obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
+obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o
+obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
+obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
+obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o
+obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
+obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
+obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
+obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
+obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
index 8511c648a15c21cd5d7239e804ca1088c3e189bf..3485d1ecd6557e271443920804e09f21230af33d 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/of.h>
 
-#include <video/omapdss.h>
 #include <video/omap-panel-data.h>
 
+#include "../dss/omapdss.h"
+
 struct panel_drv_data {
        struct omap_dss_device dssdev;
        struct omap_dss_device *in;
@@ -25,7 +26,6 @@ struct panel_drv_data {
 
        struct omap_video_timings timings;
 
-       enum omap_dss_venc_type connector_type;
        bool invert_polarity;
 };
 
@@ -45,10 +45,6 @@ static const struct omap_video_timings tvc_pal_timings = {
 
 static const struct of_device_id tvc_of_match[];
 
-struct tvc_of_data {
-       enum omap_dss_venc_type connector_type;
-};
-
 #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
 
 static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +95,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
        in->ops.atv->set_timings(in, &ddata->timings);
 
        if (!ddata->dev->of_node) {
-               in->ops.atv->set_type(in, ddata->connector_type);
+               in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
 
                in->ops.atv->invert_vid_out_polarity(in,
                        ddata->invert_polarity);
@@ -207,7 +203,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
 
        ddata->in = in;
 
-       ddata->connector_type = pdata->connector_type;
        ddata->invert_polarity = pdata->invert_polarity;
 
        dssdev = &ddata->dssdev;
index 747f26a55e43cd74f337042accff30f383159f72..684b7aeda411a859299930d174e953450df58775 100644 (file)
 #include <linux/slab.h>
 
 #include <drm/drm_edid.h>
-
-#include <video/omapdss.h>
 #include <video/omap-panel-data.h>
 
+#include "../dss/omapdss.h"
+
 static const struct omap_video_timings dvic_default_timings = {
        .x_res          = 640,
        .y_res          = 480,
@@ -255,6 +255,7 @@ static int dvic_probe_of(struct platform_device *pdev)
        adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
        if (adapter_node) {
                adapter = of_get_i2c_adapter_by_node(adapter_node);
+               of_node_put(adapter_node);
                if (adapter == NULL) {
                        dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
                        omap_dss_put_device(ddata->in);
index 225fd8d6ab31a0fae0d0347bd0155ae4f6d25682..7bdf83af9797db85a35430187a492a3540b1405d 100644 (file)
@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  */
 
+#include <linux/gpio/consumer.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/of_gpio.h>
 
 #include <drm/drm_edid.h>
-
-#include <video/omapdss.h>
 #include <video/omap-panel-data.h>
 
+#include "../dss/omapdss.h"
+
 static const struct omap_video_timings hdmic_default_timings = {
        .x_res          = 640,
        .y_res          = 480,
index 8c246c213e06ea1bfaff43f49c34e4a903a6193a..fe4e7ec3bab079101363b5d83890573988811ec7 100644 (file)
  * the Free Software Foundation.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
+#include "../dss/omapdss.h"
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
index 2fd5602880a76176c60ca99d75b085194e9f34cd..d768217cefe04dd62a65feffe14f5a863a328ece 100644 (file)
@@ -9,14 +9,13 @@
  * the Free Software Foundation.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
index 916a89978387c8c9c2ff73f1091a1d743eab5202..46855c8f5cbf3fe820666fb57af364c3d8ca4b11 100644 (file)
@@ -16,8 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/gpio/consumer.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
index e780fd4f8b46b3cc24637985eeb663960b522429..7f16f985ab22a7beea6e68551bb9d170efc28401 100644 (file)
@@ -9,17 +9,19 @@
  * the Free Software Foundation.
  */
 
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
 
-#include <video/omapdss.h>
 #include <video/omap-panel-data.h>
 #include <video/of_display_timing.h>
 
+#include "../dss/omapdss.h"
+
 struct panel_drv_data {
        struct omap_dss_device dssdev;
        struct omap_dss_device *in;
@@ -32,6 +34,7 @@ struct panel_drv_data {
        int backlight_gpio;
 
        struct gpio_desc *enable_gpio;
+       struct regulator *vcc_supply;
 };
 
 #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -83,6 +86,12 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
        if (r)
                return r;
 
+       r = regulator_enable(ddata->vcc_supply);
+       if (r) {
+               in->ops.dpi->disable(in);
+               return r;
+       }
+
        gpiod_set_value_cansleep(ddata->enable_gpio, 1);
 
        if (gpio_is_valid(ddata->backlight_gpio))
@@ -105,6 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
                gpio_set_value_cansleep(ddata->backlight_gpio, 0);
 
        gpiod_set_value_cansleep(ddata->enable_gpio, 0);
+       regulator_disable(ddata->vcc_supply);
 
        in->ops.dpi->disable(in);
 
@@ -213,6 +223,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
 
        ddata->enable_gpio = gpio;
 
+       /*
+        * Many different panels are supported by this driver and there are
+        * probably very different needs for their reset pins in regards to
+        * timing and order relative to the enable gpio. So for now it's just
+        * ensured that the reset line isn't active.
+        */
+       gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(gpio))
+               return PTR_ERR(gpio);
+
+       ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc");
+       if (IS_ERR(ddata->vcc_supply))
+               return PTR_ERR(ddata->vcc_supply);
+
        ddata->backlight_gpio = -ENOENT;
 
        r = of_get_display_timing(node, "panel-timing", &timing);
index 36485c2137ce43e57cad855dc459f9c0f13bcc41..1b0cf2d8224b3ade7cf2aeb124db6e58c5faebf4 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/backlight.h>
 #include <linux/delay.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
 #include <video/mipi_display.h>
 
+#include "../dss/omapdss.h"
+
 /* DSI Virtual channel. Hardcoded for now. */
 #define TCH 0
 
index 458f77bc473dde54497727e0971afb038037108d..6dfb96cea293e2c1cdc0deeff32096b9057710e4 100644 (file)
@@ -15,9 +15,9 @@
 #include <linux/spi/spi.h>
 #include <linux/mutex.h>
 #include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include "../dss/omapdss.h"
 
 static struct omap_video_timings lb035q02_timings = {
        .x_res = 320,
@@ -50,9 +50,6 @@ struct panel_drv_data {
 
        struct omap_video_timings videomode;
 
-       /* used for non-DT boot, to be removed */
-       int backlight_gpio;
-
        struct gpio_desc *enable_gpio;
 };
 
@@ -170,9 +167,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
        if (ddata->enable_gpio)
                gpiod_set_value_cansleep(ddata->enable_gpio, 1);
 
-       if (gpio_is_valid(ddata->backlight_gpio))
-               gpio_set_value_cansleep(ddata->backlight_gpio, 1);
-
        dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
 
        return 0;
@@ -189,9 +183,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
        if (ddata->enable_gpio)
                gpiod_set_value_cansleep(ddata->enable_gpio, 0);
 
-       if (gpio_is_valid(ddata->backlight_gpio))
-               gpio_set_value_cansleep(ddata->backlight_gpio, 0);
-
        in->ops.dpi->disable(in);
 
        dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -255,8 +246,6 @@ static int lb035q02_probe_of(struct spi_device *spi)
 
        ddata->enable_gpio = gpio;
 
-       ddata->backlight_gpio = -ENOENT;
-
        in = omapdss_of_find_source_for_first_ep(node);
        if (IS_ERR(in)) {
                dev_err(&spi->dev, "failed to find video source\n");
@@ -289,13 +278,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
        if (r)
                return r;
 
-       if (gpio_is_valid(ddata->backlight_gpio)) {
-               r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
-                               GPIOF_OUT_INIT_LOW, "panel backlight");
-               if (r)
-                       goto err_gpio;
-       }
-
        ddata->videomode = lb035q02_timings;
 
        dssdev = &ddata->dssdev;
@@ -315,7 +297,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
        return 0;
 
 err_reg:
-err_gpio:
        omap_dss_put_device(ddata->in);
        return r;
 }
index 780cb263a318ebe311eac0fa67dbeb5fe3ed6864..fc4c238c95836f10bd9237745d6f484b7f4e71ad 100644 (file)
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
+#include "../dss/omapdss.h"
 
 struct panel_drv_data {
        struct omap_dss_device  dssdev;
index 529a017602e4687bdf9cf8d11359e357e991f4ae..3d3efc561ea961d5abc3474c40b90723e8428339 100644 (file)
  */
 
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
+
+#include "../dss/omapdss.h"
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
index 31efcca801bdce78f68b6e963a4c80694f85b7d1..157c512205d19f18d9fa3eb92e005bac8c438799 100644 (file)
 #include <linux/sched.h>
 #include <linux/backlight.h>
 #include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
 #include <video/omap-panel-data.h>
 
+#include "../dss/omapdss.h"
+
 #define MIPID_CMD_READ_DISP_ID         0x04
 #define MIPID_CMD_READ_RED             0x06
 #define MIPID_CMD_READ_GREEN           0x07
index bd8d850419267e4b74307e70e43f8149e097c593..e859b3f893f70a67c956615894c87f8b8f9fc171 100644 (file)
@@ -28,7 +28,8 @@
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/gpio.h>
-#include <video/omapdss.h>
+
+#include "../dss/omapdss.h"
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
index 03e2beb7b4f0719263719c75f1f46cd6cc318369..66c6bbe6472bbe781f9fcbd80e29ac324d5e742e 100644 (file)
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
+#include "../dss/omapdss.h"
 
 #define TPO_R02_MODE(x)                ((x) & 7)
 #define TPO_R02_MODE_800x480   7
index 7e4e5bebabbe4f3ce98e85db13f2a0611345f065..6a3ebfcd72238a8a9206787bbea662f8043ba27a 100644 (file)
@@ -35,8 +35,7 @@
 #include <linux/suspend.h>
 #include <linux/slab.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 #include "dss_features.h"
 
@@ -196,8 +195,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
                core.default_display_name = def_disp_name;
        else if (pdata->default_display_name)
                core.default_display_name = pdata->default_display_name;
-       else if (pdata->default_device)
-               core.default_display_name = pdata->default_device->name;
 
        return 0;
 
index f83608b69e68342a1f1c779bfd13f2ac1a9c4d4e..535240fba67158e22ea49c6b8c91653840bfa83d 100644 (file)
@@ -41,8 +41,7 @@
 #include <linux/of.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 #include "dss_features.h"
 #include "dispc.h"
@@ -113,9 +112,14 @@ struct dispc_features {
         * never both, we can just use this flag for now.
         */
        bool reverse_ilace_field_order:1;
+
+       bool has_gamma_table:1;
+
+       bool has_gamma_i734_bug:1;
 };
 
 #define DISPC_MAX_NR_FIFOS 5
+#define DISPC_MAX_CHANNEL_GAMMA 4
 
 static struct {
        struct platform_device *pdev;
@@ -135,6 +139,8 @@ static struct {
        bool            ctx_valid;
        u32             ctx[DISPC_SZ_REGS / sizeof(u32)];
 
+       u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA];
+
        const struct dispc_features *feat;
 
        bool is_enabled;
@@ -178,11 +184,19 @@ struct dispc_reg_field {
        u8 low;
 };
 
+struct dispc_gamma_desc {
+       u32 len;
+       u32 bits;
+       u16 reg;
+       bool has_index;
+};
+
 static const struct {
        const char *name;
        u32 vsync_irq;
        u32 framedone_irq;
        u32 sync_lost_irq;
+       struct dispc_gamma_desc gamma;
        struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM];
 } mgr_desc[] = {
        [OMAP_DSS_CHANNEL_LCD] = {
@@ -190,6 +204,12 @@ static const struct {
                .vsync_irq      = DISPC_IRQ_VSYNC,
                .framedone_irq  = DISPC_IRQ_FRAMEDONE,
                .sync_lost_irq  = DISPC_IRQ_SYNC_LOST,
+               .gamma          = {
+                       .len    = 256,
+                       .bits   = 8,
+                       .reg    = DISPC_GAMMA_TABLE0,
+                       .has_index = true,
+               },
                .reg_desc       = {
                        [DISPC_MGR_FLD_ENABLE]          = { DISPC_CONTROL,  0,  0 },
                        [DISPC_MGR_FLD_STNTFT]          = { DISPC_CONTROL,  3,  3 },
@@ -207,6 +227,12 @@ static const struct {
                .vsync_irq      = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
                .framedone_irq  = DISPC_IRQ_FRAMEDONETV,
                .sync_lost_irq  = DISPC_IRQ_SYNC_LOST_DIGIT,
+               .gamma          = {
+                       .len    = 1024,
+                       .bits   = 10,
+                       .reg    = DISPC_GAMMA_TABLE2,
+                       .has_index = false,
+               },
                .reg_desc       = {
                        [DISPC_MGR_FLD_ENABLE]          = { DISPC_CONTROL,  1,  1 },
                        [DISPC_MGR_FLD_STNTFT]          = { },
@@ -224,6 +250,12 @@ static const struct {
                .vsync_irq      = DISPC_IRQ_VSYNC2,
                .framedone_irq  = DISPC_IRQ_FRAMEDONE2,
                .sync_lost_irq  = DISPC_IRQ_SYNC_LOST2,
+               .gamma          = {
+                       .len    = 256,
+                       .bits   = 8,
+                       .reg    = DISPC_GAMMA_TABLE1,
+                       .has_index = true,
+               },
                .reg_desc       = {
                        [DISPC_MGR_FLD_ENABLE]          = { DISPC_CONTROL2,  0,  0 },
                        [DISPC_MGR_FLD_STNTFT]          = { DISPC_CONTROL2,  3,  3 },
@@ -241,6 +273,12 @@ static const struct {
                .vsync_irq      = DISPC_IRQ_VSYNC3,
                .framedone_irq  = DISPC_IRQ_FRAMEDONE3,
                .sync_lost_irq  = DISPC_IRQ_SYNC_LOST3,
+               .gamma          = {
+                       .len    = 256,
+                       .bits   = 8,
+                       .reg    = DISPC_GAMMA_TABLE3,
+                       .has_index = true,
+               },
                .reg_desc       = {
                        [DISPC_MGR_FLD_ENABLE]          = { DISPC_CONTROL3,  0,  0 },
                        [DISPC_MGR_FLD_STNTFT]          = { DISPC_CONTROL3,  3,  3 },
@@ -1084,20 +1122,6 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane plane)
        return unit * 8;
 }
 
-void dispc_enable_gamma_table(bool enable)
-{
-       /*
-        * This is partially implemented to support only disabling of
-        * the gamma table.
-        */
-       if (enable) {
-               DSSWARN("Gamma table enabling for TV not yet supported");
-               return;
-       }
-
-       REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
-}
-
 static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
 {
        if (channel == OMAP_DSS_CHANNEL_DIGIT)
@@ -3299,30 +3323,21 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
 
 static unsigned long dispc_fclk_rate(void)
 {
-       struct dss_pll *pll;
-       unsigned long r = 0;
+       unsigned long r;
+       enum dss_clk_source src;
+
+       src = dss_get_dispc_clk_source();
 
-       switch (dss_get_dispc_clk_source()) {
-       case OMAP_DSS_CLK_SRC_FCK:
+       if (src == DSS_CLK_SRC_FCK) {
                r = dss_get_dispc_clk_rate();
-               break;
-       case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
-               pll = dss_pll_find("dsi0");
-               if (!pll)
-                       pll = dss_pll_find("video0");
+       } else {
+               struct dss_pll *pll;
+               unsigned clkout_idx;
 
-               r = pll->cinfo.clkout[0];
-               break;
-       case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
-               pll = dss_pll_find("dsi1");
-               if (!pll)
-                       pll = dss_pll_find("video1");
+               pll = dss_pll_find_by_src(src);
+               clkout_idx = dss_pll_get_clkout_idx_for_src(src);
 
-               r = pll->cinfo.clkout[0];
-               break;
-       default:
-               BUG();
-               return 0;
+               r = pll->cinfo.clkout[clkout_idx];
        }
 
        return r;
@@ -3330,43 +3345,31 @@ static unsigned long dispc_fclk_rate(void)
 
 static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
 {
-       struct dss_pll *pll;
        int lcd;
        unsigned long r;
-       u32 l;
-
-       if (dss_mgr_is_lcd(channel)) {
-               l = dispc_read_reg(DISPC_DIVISORo(channel));
+       enum dss_clk_source src;
 
-               lcd = FLD_GET(l, 23, 16);
+       /* for TV, LCLK rate is the FCLK rate */
+       if (!dss_mgr_is_lcd(channel))
+               return dispc_fclk_rate();
 
-               switch (dss_get_lcd_clk_source(channel)) {
-               case OMAP_DSS_CLK_SRC_FCK:
-                       r = dss_get_dispc_clk_rate();
-                       break;
-               case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
-                       pll = dss_pll_find("dsi0");
-                       if (!pll)
-                               pll = dss_pll_find("video0");
+       src = dss_get_lcd_clk_source(channel);
 
-                       r = pll->cinfo.clkout[0];
-                       break;
-               case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
-                       pll = dss_pll_find("dsi1");
-                       if (!pll)
-                               pll = dss_pll_find("video1");
+       if (src == DSS_CLK_SRC_FCK) {
+               r = dss_get_dispc_clk_rate();
+       } else {
+               struct dss_pll *pll;
+               unsigned clkout_idx;
 
-                       r = pll->cinfo.clkout[0];
-                       break;
-               default:
-                       BUG();
-                       return 0;
-               }
+               pll = dss_pll_find_by_src(src);
+               clkout_idx = dss_pll_get_clkout_idx_for_src(src);
 
-               return r / lcd;
-       } else {
-               return dispc_fclk_rate();
+               r = pll->cinfo.clkout[clkout_idx];
        }
+
+       lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16);
+
+       return r / lcd;
 }
 
 static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
@@ -3426,15 +3429,14 @@ static unsigned long dispc_plane_lclk_rate(enum omap_plane plane)
 static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
 {
        int lcd, pcd;
-       enum omap_dss_clk_source lcd_clk_src;
+       enum dss_clk_source lcd_clk_src;
 
        seq_printf(s, "- %s -\n", mgr_desc[channel].name);
 
        lcd_clk_src = dss_get_lcd_clk_source(channel);
 
-       seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name,
-               dss_get_generic_clk_source_name(lcd_clk_src),
-               dss_feat_get_clk_source_name(lcd_clk_src));
+       seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name,
+               dss_get_clk_source_name(lcd_clk_src));
 
        dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd);
 
@@ -3448,16 +3450,15 @@ void dispc_dump_clocks(struct seq_file *s)
 {
        int lcd;
        u32 l;
-       enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
+       enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
 
        if (dispc_runtime_get())
                return;
 
        seq_printf(s, "- DISPC -\n");
 
-       seq_printf(s, "dispc fclk source = %s (%s)\n",
-                       dss_get_generic_clk_source_name(dispc_clk_src),
-                       dss_feat_get_clk_source_name(dispc_clk_src));
+       seq_printf(s, "dispc fclk source = %s\n",
+                       dss_get_clk_source_name(dispc_clk_src));
 
        seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
 
@@ -3814,6 +3815,139 @@ void dispc_disable_sidle(void)
        REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3);  /* SIDLEMODE: no idle */
 }
 
+u32 dispc_mgr_gamma_size(enum omap_channel channel)
+{
+       const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+
+       if (!dispc.feat->has_gamma_table)
+               return 0;
+
+       return gdesc->len;
+}
+EXPORT_SYMBOL(dispc_mgr_gamma_size);
+
+static void dispc_mgr_write_gamma_table(enum omap_channel channel)
+{
+       const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+       u32 *table = dispc.gamma_table[channel];
+       unsigned int i;
+
+       DSSDBG("%s: channel %d\n", __func__, channel);
+
+       for (i = 0; i < gdesc->len; ++i) {
+               u32 v = table[i];
+
+               if (gdesc->has_index)
+                       v |= i << 24;
+               else if (i == 0)
+                       v |= 1 << 31;
+
+               dispc_write_reg(gdesc->reg, v);
+       }
+}
+
+static void dispc_restore_gamma_tables(void)
+{
+       DSSDBG("%s()\n", __func__);
+
+       if (!dispc.feat->has_gamma_table)
+               return;
+
+       dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD);
+
+       dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
+
+       if (dss_has_feature(FEAT_MGR_LCD2))
+               dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
+
+       if (dss_has_feature(FEAT_MGR_LCD3))
+               dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
+}
+
+static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
+       { .red = 0, .green = 0, .blue = 0, },
+       { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
+};
+
+void dispc_mgr_set_gamma(enum omap_channel channel,
+                        const struct drm_color_lut *lut,
+                        unsigned int length)
+{
+       const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+       u32 *table = dispc.gamma_table[channel];
+       uint i;
+
+       DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__,
+              channel, length, gdesc->len);
+
+       if (!dispc.feat->has_gamma_table)
+               return;
+
+       if (lut == NULL || length < 2) {
+               lut = dispc_mgr_gamma_default_lut;
+               length = ARRAY_SIZE(dispc_mgr_gamma_default_lut);
+       }
+
+       for (i = 0; i < length - 1; ++i) {
+               uint first = i * (gdesc->len - 1) / (length - 1);
+               uint last = (i + 1) * (gdesc->len - 1) / (length - 1);
+               uint w = last - first;
+               u16 r, g, b;
+               uint j;
+
+               if (w == 0)
+                       continue;
+
+               for (j = 0; j <= w; j++) {
+                       r = (lut[i].red * (w - j) + lut[i+1].red * j) / w;
+                       g = (lut[i].green * (w - j) + lut[i+1].green * j) / w;
+                       b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w;
+
+                       r >>= 16 - gdesc->bits;
+                       g >>= 16 - gdesc->bits;
+                       b >>= 16 - gdesc->bits;
+
+                       table[first + j] = (r << (gdesc->bits * 2)) |
+                               (g << gdesc->bits) | b;
+               }
+       }
+
+       if (dispc.is_enabled)
+               dispc_mgr_write_gamma_table(channel);
+}
+EXPORT_SYMBOL(dispc_mgr_set_gamma);
+
+static int dispc_init_gamma_tables(void)
+{
+       int channel;
+
+       if (!dispc.feat->has_gamma_table)
+               return 0;
+
+       for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) {
+               const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
+               u32 *gt;
+
+               if (channel == OMAP_DSS_CHANNEL_LCD2 &&
+                   !dss_has_feature(FEAT_MGR_LCD2))
+                       continue;
+
+               if (channel == OMAP_DSS_CHANNEL_LCD3 &&
+                   !dss_has_feature(FEAT_MGR_LCD3))
+                       continue;
+
+               gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
+                                          sizeof(u32), GFP_KERNEL);
+               if (!gt)
+                       return -ENOMEM;
+
+               dispc.gamma_table[channel] = gt;
+
+               dispc_mgr_set_gamma(channel, NULL, 0);
+       }
+       return 0;
+}
+
 static void _omap_dispc_initial_config(void)
 {
        u32 l;
@@ -3829,8 +3963,15 @@ static void _omap_dispc_initial_config(void)
                dispc.core_clk_rate = dispc_fclk_rate();
        }
 
-       /* FUNCGATED */
-       if (dss_has_feature(FEAT_FUNCGATED))
+       /* Use gamma table mode, instead of palette mode */
+       if (dispc.feat->has_gamma_table)
+               REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3);
+
+       /* For older DSS versions (FEAT_FUNCGATED) this enables
+        * func-clock auto-gating. For newer versions
+        * (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
+        */
+       if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
                REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
 
        dispc_setup_color_conv_coef();
@@ -3934,6 +4075,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
        .has_writeback          =       true,
        .supports_double_pixel  =       true,
        .reverse_ilace_field_order =    true,
+       .has_gamma_table        =       true,
+       .has_gamma_i734_bug     =       true,
 };
 
 static const struct dispc_features omap54xx_dispc_feats = {
@@ -3959,6 +4102,8 @@ static const struct dispc_features omap54xx_dispc_feats = {
        .has_writeback          =       true,
        .supports_double_pixel  =       true,
        .reverse_ilace_field_order =    true,
+       .has_gamma_table        =       true,
+       .has_gamma_i734_bug     =       true,
 };
 
 static int dispc_init_features(struct platform_device *pdev)
@@ -4050,6 +4195,168 @@ void dispc_free_irq(void *dev_id)
 }
 EXPORT_SYMBOL(dispc_free_irq);
 
+/*
+ * Workaround for errata i734 in DSS dispc
+ *  - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled
+ *
+ * For gamma tables to work on LCD1 the GFX plane has to be used at
+ * least once after DSS HW has come out of reset. The workaround
+ * sets up a minimal LCD setup with GFX plane and waits for one
+ * vertical sync irq before disabling the setup and continuing with
+ * the context restore. The physical outputs are gated during the
+ * operation. This workaround requires that gamma table's LOADMODE
+ * is set to 0x2 in DISPC_CONTROL1 register.
+ *
+ * For details see:
+ * OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata
+ * Literature Number: SWPZ037E
+ * Or some other relevant errata document for the DSS IP version.
+ */
+
+static const struct dispc_errata_i734_data {
+       struct omap_video_timings timings;
+       struct omap_overlay_info ovli;
+       struct omap_overlay_manager_info mgri;
+       struct dss_lcd_mgr_config lcd_conf;
+} i734 = {
+       .timings = {
+               .x_res = 8, .y_res = 1,
+               .pixelclock = 16000000,
+               .hsw = 8, .hfp = 4, .hbp = 4,
+               .vsw = 1, .vfp = 1, .vbp = 1,
+               .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+               .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+               .interlace = false,
+               .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+               .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+               .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+               .double_pixel = false,
+       },
+       .ovli = {
+               .screen_width = 1,
+               .width = 1, .height = 1,
+               .color_mode = OMAP_DSS_COLOR_RGB24U,
+               .rotation = OMAP_DSS_ROT_0,
+               .rotation_type = OMAP_DSS_ROT_DMA,
+               .mirror = 0,
+               .pos_x = 0, .pos_y = 0,
+               .out_width = 0, .out_height = 0,
+               .global_alpha = 0xff,
+               .pre_mult_alpha = 0,
+               .zorder = 0,
+       },
+       .mgri = {
+               .default_color = 0,
+               .trans_enabled = false,
+               .partial_alpha_enabled = false,
+               .cpr_enable = false,
+       },
+       .lcd_conf = {
+               .io_pad_mode = DSS_IO_PAD_MODE_BYPASS,
+               .stallmode = false,
+               .fifohandcheck = false,
+               .clock_info = {
+                       .lck_div = 1,
+                       .pck_div = 2,
+               },
+               .video_port_width = 24,
+               .lcden_sig_polarity = 0,
+       },
+};
+
+static struct i734_buf {
+       size_t size;
+       dma_addr_t paddr;
+       void *vaddr;
+} i734_buf;
+
+static int dispc_errata_i734_wa_init(void)
+{
+       if (!dispc.feat->has_gamma_i734_bug)
+               return 0;
+
+       i734_buf.size = i734.ovli.width * i734.ovli.height *
+               color_mode_to_bpp(i734.ovli.color_mode) / 8;
+
+       i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size,
+                                               &i734_buf.paddr, GFP_KERNEL);
+       if (!i734_buf.vaddr) {
+               dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void dispc_errata_i734_wa_fini(void)
+{
+       if (!dispc.feat->has_gamma_i734_bug)
+               return;
+
+       dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr,
+                             i734_buf.paddr);
+}
+
+static void dispc_errata_i734_wa(void)
+{
+       u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD);
+       struct omap_overlay_info ovli;
+       struct dss_lcd_mgr_config lcd_conf;
+       u32 gatestate;
+       unsigned int count;
+
+       if (!dispc.feat->has_gamma_i734_bug)
+               return;
+
+       gatestate = REG_GET(DISPC_CONFIG, 8, 4);
+
+       ovli = i734.ovli;
+       ovli.paddr = i734_buf.paddr;
+       lcd_conf = i734.lcd_conf;
+
+       /* Gate all LCD1 outputs */
+       REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4);
+
+       /* Setup and enable GFX plane */
+       dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
+       dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
+       dispc_ovl_enable(OMAP_DSS_GFX, true);
+
+       /* Set up and enable display manager for LCD1 */
+       dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri);
+       dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
+                              &lcd_conf.clock_info);
+       dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
+       dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
+
+       dispc_clear_irqstatus(framedone_irq);
+
+       /* Enable and shut the channel to produce just one frame */
+       dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true);
+       dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false);
+
+       /* Busy wait for framedone. We can't fiddle with irq handlers
+        * in PM resume. Typically the loop runs less than 5 times and
+        * waits less than a micro second.
+        */
+       count = 0;
+       while (!(dispc_read_irqstatus() & framedone_irq)) {
+               if (count++ > 10000) {
+                       dev_err(&dispc.pdev->dev, "%s: framedone timeout\n",
+                               __func__);
+                       break;
+               }
+       }
+       dispc_ovl_enable(OMAP_DSS_GFX, false);
+
+       /* Clear all irq bits before continuing */
+       dispc_clear_irqstatus(0xffffffff);
+
+       /* Restore the original state to LCD1 output gates */
+       REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4);
+}
+
 /* DISPC HW IP initialisation */
 static int dispc_bind(struct device *dev, struct device *master, void *data)
 {
@@ -4067,6 +4374,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
        if (r)
                return r;
 
+       r = dispc_errata_i734_wa_init();
+       if (r)
+               return r;
+
        dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
        if (!dispc_mem) {
                DSSERR("can't get IORESOURCE_MEM DISPC\n");
@@ -4100,6 +4411,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
                }
        }
 
+       r = dispc_init_gamma_tables();
+       if (r)
+               return r;
+
        pm_runtime_enable(&pdev->dev);
 
        r = dispc_runtime_get();
@@ -4127,6 +4442,8 @@ static void dispc_unbind(struct device *dev, struct device *master,
                               void *data)
 {
        pm_runtime_disable(dev);
+
+       dispc_errata_i734_wa_fini();
 }
 
 static const struct component_ops dispc_component_ops = {
@@ -4169,7 +4486,11 @@ static int dispc_runtime_resume(struct device *dev)
        if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
                _omap_dispc_initial_config();
 
+               dispc_errata_i734_wa();
+
                dispc_restore_context();
+
+               dispc_restore_gamma_tables();
        }
 
        dispc.is_enabled = true;
index 483744223dd1a34bb06a08777c3886d721d963d3..bc1d8126ee878a481d0159965eb761e67363f2b2 100644 (file)
 #define DISPC_MSTANDBY_CTRL            0x0858
 #define DISPC_GLOBAL_MFLAG_ATTRIBUTE   0x085C
 
+#define DISPC_GAMMA_TABLE0             0x0630
+#define DISPC_GAMMA_TABLE1             0x0634
+#define DISPC_GAMMA_TABLE2             0x0638
+#define DISPC_GAMMA_TABLE3             0x0850
+
 /* DISPC overlay registers */
 #define DISPC_OVL_BA0(n)               (DISPC_OVL_BASE(n) + \
                                        DISPC_BA0_OFFSET(n))
index 038c15b04215c8d11adfbde220be1f25b54d75e3..34fad2376f8ddf1d713d1adaac09e9d787cea50f 100644 (file)
@@ -18,8 +18,8 @@
  */
 
 #include <linux/kernel.h>
-#include <video/omapdss.h>
 
+#include "omapdss.h"
 #include "dispc.h"
 
 static const struct dispc_coef coef3_M8[8] = {
index 9f3dd09b0a6c79bab81940860c99c45789542d1c..8dcdd7cf993706afe59005a10794bc937d763b55 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/platform_device.h>
 #include <linux/of.h>
 
-#include <video/omapdss.h>
+#include "omapdss.h"
 #include "dss.h"
 #include "dss_features.h"
 
index 97ea602578849c966ec0f50e69c725fb666f5018..b268295b76cfe996725869acb668b645202b39c2 100644 (file)
 #include <linux/clk.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 #include "dss_features.h"
 
-#define HSDIV_DISPC    0
-
 struct dpi_data {
        struct platform_device *pdev;
 
        struct regulator *vdds_dsi_reg;
+       enum dss_clk_source clk_src;
        struct dss_pll *pll;
 
        struct mutex lock;
@@ -69,7 +67,7 @@ static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
        return dev_get_drvdata(&pdev->dev);
 }
 
-static struct dss_pll *dpi_get_pll(enum omap_channel channel)
+static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
 {
        /*
         * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
@@ -83,64 +81,51 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel)
        case OMAPDSS_VER_OMAP3630:
        case OMAPDSS_VER_AM35xx:
        case OMAPDSS_VER_AM43xx:
-               return NULL;
+               return DSS_CLK_SRC_FCK;
 
        case OMAPDSS_VER_OMAP4430_ES1:
        case OMAPDSS_VER_OMAP4430_ES2:
        case OMAPDSS_VER_OMAP4:
                switch (channel) {
                case OMAP_DSS_CHANNEL_LCD:
-                       return dss_pll_find("dsi0");
+                       return DSS_CLK_SRC_PLL1_1;
                case OMAP_DSS_CHANNEL_LCD2:
-                       return dss_pll_find("dsi1");
+                       return DSS_CLK_SRC_PLL2_1;
                default:
-                       return NULL;
+                       return DSS_CLK_SRC_FCK;
                }
 
        case OMAPDSS_VER_OMAP5:
                switch (channel) {
                case OMAP_DSS_CHANNEL_LCD:
-                       return dss_pll_find("dsi0");
+                       return DSS_CLK_SRC_PLL1_1;
                case OMAP_DSS_CHANNEL_LCD3:
-                       return dss_pll_find("dsi1");
+                       return DSS_CLK_SRC_PLL2_1;
+               case OMAP_DSS_CHANNEL_LCD2:
                default:
-                       return NULL;
+                       return DSS_CLK_SRC_FCK;
                }
 
        case OMAPDSS_VER_DRA7xx:
                switch (channel) {
                case OMAP_DSS_CHANNEL_LCD:
+                       return DSS_CLK_SRC_PLL1_1;
                case OMAP_DSS_CHANNEL_LCD2:
-                       return dss_pll_find("video0");
+                       return DSS_CLK_SRC_PLL1_3;
                case OMAP_DSS_CHANNEL_LCD3:
-                       return dss_pll_find("video1");
+                       return DSS_CLK_SRC_PLL2_1;
                default:
-                       return NULL;
+                       return DSS_CLK_SRC_FCK;
                }
 
        default:
-               return NULL;
-       }
-}
-
-static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
-{
-       switch (channel) {
-       case OMAP_DSS_CHANNEL_LCD:
-               return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC;
-       case OMAP_DSS_CHANNEL_LCD2:
-               return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
-       case OMAP_DSS_CHANNEL_LCD3:
-               return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
-       default:
-               /* this shouldn't happen */
-               WARN_ON(1);
-               return OMAP_DSS_CLK_SRC_FCK;
+               return DSS_CLK_SRC_FCK;
        }
 }
 
 struct dpi_clk_calc_ctx {
        struct dss_pll *pll;
+       unsigned clkout_idx;
 
        /* inputs */
 
@@ -148,7 +133,7 @@ struct dpi_clk_calc_ctx {
 
        /* outputs */
 
-       struct dss_pll_clock_info dsi_cinfo;
+       struct dss_pll_clock_info pll_cinfo;
        unsigned long fck;
        struct dispc_clock_info dispc_cinfo;
 };
@@ -193,8 +178,8 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
        if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000)
                return false;
 
-       ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
-       ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
+       ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
+       ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
 
        return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
                        dpi_calc_dispc_cb, ctx);
@@ -207,12 +192,12 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
 {
        struct dpi_clk_calc_ctx *ctx = data;
 
-       ctx->dsi_cinfo.n = n;
-       ctx->dsi_cinfo.m = m;
-       ctx->dsi_cinfo.fint = fint;
-       ctx->dsi_cinfo.clkdco = clkdco;
+       ctx->pll_cinfo.n = n;
+       ctx->pll_cinfo.m = m;
+       ctx->pll_cinfo.fint = fint;
+       ctx->pll_cinfo.clkdco = clkdco;
 
-       return dss_pll_hsdiv_calc(ctx->pll, clkdco,
+       return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
                ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
                dpi_calc_hsdiv_cb, ctx);
 }
@@ -227,25 +212,39 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
                        dpi_calc_dispc_cb, ctx);
 }
 
-static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck,
+static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
                struct dpi_clk_calc_ctx *ctx)
 {
        unsigned long clkin;
-       unsigned long pll_min, pll_max;
 
        memset(ctx, 0, sizeof(*ctx));
        ctx->pll = dpi->pll;
-       ctx->pck_min = pck - 1000;
-       ctx->pck_max = pck + 1000;
+       ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
 
-       pll_min = 0;
-       pll_max = 0;
+       clkin = clk_get_rate(dpi->pll->clkin);
 
-       clkin = clk_get_rate(ctx->pll->clkin);
+       if (dpi->pll->hw->type == DSS_PLL_TYPE_A) {
+               unsigned long pll_min, pll_max;
 
-       return dss_pll_calc(ctx->pll, clkin,
-                       pll_min, pll_max,
-                       dpi_calc_pll_cb, ctx);
+               ctx->pck_min = pck - 1000;
+               ctx->pck_max = pck + 1000;
+
+               pll_min = 0;
+               pll_max = 0;
+
+               return dss_pll_calc_a(ctx->pll, clkin,
+                               pll_min, pll_max,
+                               dpi_calc_pll_cb, ctx);
+       } else { /* DSS_PLL_TYPE_B */
+               dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo);
+
+               ctx->dispc_cinfo.lck_div = 1;
+               ctx->dispc_cinfo.pck_div = 1;
+               ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0];
+               ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck;
+
+               return true;
+       }
 }
 
 static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
@@ -279,7 +278,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
 
 
 
-static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
+static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
                unsigned long pck_req, unsigned long *fck, int *lck_div,
                int *pck_div)
 {
@@ -287,20 +286,19 @@ static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
        int r;
        bool ok;
 
-       ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx);
+       ok = dpi_pll_clk_calc(dpi, pck_req, &ctx);
        if (!ok)
                return -EINVAL;
 
-       r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo);
+       r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo);
        if (r)
                return r;
 
-       dss_select_lcd_clk_source(channel,
-                       dpi_get_alt_clk_src(channel));
+       dss_select_lcd_clk_source(channel, dpi->clk_src);
 
        dpi->mgr_config.clock_info = ctx.dispc_cinfo;
 
-       *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
+       *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
        *lck_div = ctx.dispc_cinfo.lck_div;
        *pck_div = ctx.dispc_cinfo.pck_div;
 
@@ -342,7 +340,7 @@ static int dpi_set_mode(struct dpi_data *dpi)
        int r = 0;
 
        if (dpi->pll)
-               r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck,
+               r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
                                &lck_div, &pck_div);
        else
                r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
@@ -419,7 +417,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
        if (dpi->pll) {
                r = dss_pll_enable(dpi->pll);
                if (r)
-                       goto err_dsi_pll_init;
+                       goto err_pll_init;
        }
 
        r = dpi_set_mode(dpi);
@@ -442,7 +440,7 @@ err_mgr_enable:
 err_set_mode:
        if (dpi->pll)
                dss_pll_disable(dpi->pll);
-err_dsi_pll_init:
+err_pll_init:
 err_src_sel:
        dispc_runtime_put();
 err_get_dispc:
@@ -465,7 +463,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
        dss_mgr_disable(channel);
 
        if (dpi->pll) {
-               dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
+               dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
                dss_pll_disable(dpi->pll);
        }
 
@@ -524,11 +522,11 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
                return -EINVAL;
 
        if (dpi->pll) {
-               ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx);
+               ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
                if (!ok)
                        return -EINVAL;
 
-               fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC];
+               fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
        } else {
                ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
                if (!ok)
@@ -558,7 +556,7 @@ static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
        mutex_unlock(&dpi->lock);
 }
 
-static int dpi_verify_dsi_pll(struct dss_pll *pll)
+static int dpi_verify_pll(struct dss_pll *pll)
 {
        int r;
 
@@ -602,16 +600,14 @@ static void dpi_init_pll(struct dpi_data *dpi)
        if (dpi->pll)
                return;
 
-       pll = dpi_get_pll(dpi->output.dispc_channel);
+       dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel);
+
+       pll = dss_pll_find_by_src(dpi->clk_src);
        if (!pll)
                return;
 
-       /* On DRA7 we need to set a mux to use the PLL */
-       if (omapdss_get_version() == OMAPDSS_VER_DRA7xx)
-               dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel);
-
-       if (dpi_verify_dsi_pll(pll)) {
-               DSSWARN("DSI PLL not operational\n");
+       if (dpi_verify_pll(pll)) {
+               DSSWARN("PLL not operational\n");
                return;
        }
 
index 8730646a0cbb260c4482d9c2dad58c9c89d9f597..e1be5e795cd807aadb0cff143ca526fb8612440b 100644 (file)
@@ -42,9 +42,9 @@
 #include <linux/of_platform.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
 #include <video/mipi_display.h>
 
+#include "omapdss.h"
 #include "dss.h"
 #include "dss_features.h"
 
@@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct regulator *vdds_dsi;
-       int r;
 
        if (dsi->vdds_dsi_reg != NULL)
                return 0;
@@ -1180,15 +1179,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
                return PTR_ERR(vdds_dsi);
        }
 
-       if (regulator_can_change_voltage(vdds_dsi)) {
-               r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
-               if (r) {
-                       devm_regulator_put(vdds_dsi);
-                       DSSERR("can't set the DSI regulator voltage\n");
-                       return r;
-               }
-       }
-
        dsi->vdds_dsi_reg = vdds_dsi;
 
        return 0;
@@ -1271,7 +1261,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
        unsigned long r;
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
 
-       if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
+       if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) {
                /* DSI FCLK source is DSS_CLK_FCK */
                r = clk_get_rate(dsi->dss_clk);
        } else {
@@ -1484,7 +1474,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
-       enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
+       enum dss_clk_source dispc_clk_src, dsi_clk_src;
        int dsi_module = dsi->module_id;
        struct dss_pll *pll = &dsi->pll;
 
@@ -1504,28 +1494,27 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
                        cinfo->clkdco, cinfo->m);
 
        seq_printf(s,   "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
-                       dss_feat_get_clk_source_name(dsi_module == 0 ?
-                               OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
-                               OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
+                       dss_get_clk_source_name(dsi_module == 0 ?
+                               DSS_CLK_SRC_PLL1_1 :
+                               DSS_CLK_SRC_PLL2_1),
                        cinfo->clkout[HSDIV_DISPC],
                        cinfo->mX[HSDIV_DISPC],
-                       dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
+                       dispc_clk_src == DSS_CLK_SRC_FCK ?
                        "off" : "on");
 
        seq_printf(s,   "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
-                       dss_feat_get_clk_source_name(dsi_module == 0 ?
-                               OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
-                               OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
+                       dss_get_clk_source_name(dsi_module == 0 ?
+                               DSS_CLK_SRC_PLL1_2 :
+                               DSS_CLK_SRC_PLL2_2),
                        cinfo->clkout[HSDIV_DSI],
                        cinfo->mX[HSDIV_DSI],
-                       dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
+                       dsi_clk_src == DSS_CLK_SRC_FCK ?
                        "off" : "on");
 
        seq_printf(s,   "- DSI%d -\n", dsi_module + 1);
 
-       seq_printf(s,   "dsi fclk source = %s (%s)\n",
-                       dss_get_generic_clk_source_name(dsi_clk_src),
-                       dss_feat_get_clk_source_name(dsi_clk_src));
+       seq_printf(s,   "dsi fclk source = %s\n",
+                       dss_get_clk_source_name(dsi_clk_src));
 
        seq_printf(s,   "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
 
@@ -4111,8 +4100,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
        int r;
 
        dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
-                       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
-                       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC);
+                       DSS_CLK_SRC_PLL1_1 :
+                       DSS_CLK_SRC_PLL2_1);
 
        if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
                r = dss_mgr_register_framedone_handler(channel,
@@ -4159,7 +4148,7 @@ err1:
                dss_mgr_unregister_framedone_handler(channel,
                                dsi_framedone_irq_callback, dsidev);
 err:
-       dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
+       dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
        return r;
 }
 
@@ -4172,7 +4161,7 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
                dss_mgr_unregister_framedone_handler(channel,
                                dsi_framedone_irq_callback, dsidev);
 
-       dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK);
+       dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
 }
 
 static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
@@ -4206,8 +4195,8 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
                goto err1;
 
        dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
-                       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
-                       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI);
+                       DSS_CLK_SRC_PLL1_2 :
+                       DSS_CLK_SRC_PLL2_2);
 
        DSSDBG("PLL OK\n");
 
@@ -4239,7 +4228,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
 err3:
        dsi_cio_uninit(dsidev);
 err2:
-       dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
+       dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
 err1:
        dss_pll_disable(&dsi->pll);
 err0:
@@ -4261,7 +4250,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev,
        dsi_vc_enable(dsidev, 2, 0);
        dsi_vc_enable(dsidev, 3, 0);
 
-       dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
+       dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
        dsi_cio_uninit(dsidev);
        dsi_pll_uninit(dsidev, disconnect_lanes);
 }
@@ -4462,7 +4451,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
        ctx->dsi_cinfo.fint = fint;
        ctx->dsi_cinfo.clkdco = clkdco;
 
-       return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
+       return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
                        dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
                        dsi_cm_calc_hsdiv_cb, ctx);
 }
@@ -4501,7 +4490,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
        pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
        pll_max = cfg->hs_clk_max * 4;
 
-       return dss_pll_calc(ctx->pll, clkin,
+       return dss_pll_calc_a(ctx->pll, clkin,
                        pll_min, pll_max,
                        dsi_cm_calc_pll_cb, ctx);
 }
@@ -4760,7 +4749,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
        ctx->dsi_cinfo.fint = fint;
        ctx->dsi_cinfo.clkdco = clkdco;
 
-       return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min,
+       return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
                        dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
                        dsi_vm_calc_hsdiv_cb, ctx);
 }
@@ -4802,7 +4791,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
                pll_max = byteclk_max * 4 * 4;
        }
 
-       return dss_pll_calc(ctx->pll, clkin,
+       return dss_pll_calc_a(ctx->pll, clkin,
                        pll_min, pll_max,
                        dsi_vm_calc_pll_cb, ctx);
 }
@@ -5148,6 +5137,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
 };
 
 static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
+       .type = DSS_PLL_TYPE_A,
+
        .n_max = (1 << 7) - 1,
        .m_max = (1 << 11) - 1,
        .mX_max = (1 << 4) - 1,
@@ -5173,6 +5164,8 @@ static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
 };
 
 static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
+       .type = DSS_PLL_TYPE_A,
+
        .n_max = (1 << 8) - 1,
        .m_max = (1 << 12) - 1,
        .mX_max = (1 << 5) - 1,
@@ -5198,6 +5191,8 @@ static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
 };
 
 static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
+       .type = DSS_PLL_TYPE_A,
+
        .n_max = (1 << 8) - 1,
        .m_max = (1 << 12) - 1,
        .mX_max = (1 << 5) - 1,
index bf407b6ba15ca0002166a0704124eeda0fb2052e..e256d879b25ccc379cf93201b155f31e00269af2 100644 (file)
@@ -18,8 +18,7 @@
 #include <linux/of.h>
 #include <linux/seq_file.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 
 struct device_node *
@@ -126,15 +125,16 @@ u32 dss_of_port_get_port_number(struct device_node *port)
 
 static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
 {
-       struct device_node *np;
+       struct device_node *np, *np_parent;
 
        np = of_parse_phandle(node, "remote-endpoint", 0);
        if (!np)
                return NULL;
 
-       np = of_get_next_parent(np);
+       np_parent = of_get_next_parent(np);
+       of_node_put(np);
 
-       return np;
+       return np_parent;
 }
 
 struct device_node *
index f95ff319e68e9f20aa8fb8cf3b7f583d0c4c2373..14887d5b02e51f22f1271970e7905937e91fe75b 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/gfp.h>
@@ -41,8 +42,7 @@
 #include <linux/suspend.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 #include "dss_features.h"
 
@@ -75,6 +75,8 @@ struct dss_features {
        const enum omap_display_type *ports;
        int num_ports;
        int (*dpi_select_source)(int port, enum omap_channel channel);
+       int (*select_lcd_source)(enum omap_channel channel,
+               enum dss_clk_source clk_src);
 };
 
 static struct {
@@ -91,9 +93,9 @@ static struct {
        unsigned long   cache_prate;
        struct dispc_clock_info cache_dispc_cinfo;
 
-       enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI];
-       enum omap_dss_clk_source dispc_clk_source;
-       enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
+       enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
+       enum dss_clk_source dispc_clk_source;
+       enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
 
        bool            ctx_valid;
        u32             ctx[DSS_SZ_REGS / sizeof(u32)];
@@ -105,11 +107,14 @@ static struct {
 } dss;
 
 static const char * const dss_generic_clk_source_names[] = {
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC]  = "DSI_PLL_HSDIV_DISPC",
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI]    = "DSI_PLL_HSDIV_DSI",
-       [OMAP_DSS_CLK_SRC_FCK]                  = "DSS_FCK",
-       [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC",
-       [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI]   = "DSI_PLL2_HSDIV_DSI",
+       [DSS_CLK_SRC_FCK]       = "FCK",
+       [DSS_CLK_SRC_PLL1_1]    = "PLL1:1",
+       [DSS_CLK_SRC_PLL1_2]    = "PLL1:2",
+       [DSS_CLK_SRC_PLL1_3]    = "PLL1:3",
+       [DSS_CLK_SRC_PLL2_1]    = "PLL2:1",
+       [DSS_CLK_SRC_PLL2_2]    = "PLL2:2",
+       [DSS_CLK_SRC_PLL2_3]    = "PLL2:3",
+       [DSS_CLK_SRC_HDMI_PLL]  = "HDMI PLL",
 };
 
 static bool dss_initialized;
@@ -202,68 +207,70 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
                1 << shift, val << shift);
 }
 
-void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
+static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
        enum omap_channel channel)
 {
        unsigned shift, val;
 
        if (!dss.syscon_pll_ctrl)
-               return;
+               return -EINVAL;
 
        switch (channel) {
        case OMAP_DSS_CHANNEL_LCD:
                shift = 3;
 
-               switch (pll_id) {
-               case DSS_PLL_VIDEO1:
+               switch (clk_src) {
+               case DSS_CLK_SRC_PLL1_1:
                        val = 0; break;
-               case DSS_PLL_HDMI:
+               case DSS_CLK_SRC_HDMI_PLL:
                        val = 1; break;
                default:
                        DSSERR("error in PLL mux config for LCD\n");
-                       return;
+                       return -EINVAL;
                }
 
                break;
        case OMAP_DSS_CHANNEL_LCD2:
                shift = 5;
 
-               switch (pll_id) {
-               case DSS_PLL_VIDEO1:
+               switch (clk_src) {
+               case DSS_CLK_SRC_PLL1_3:
                        val = 0; break;
-               case DSS_PLL_VIDEO2:
+               case DSS_CLK_SRC_PLL2_3:
                        val = 1; break;
-               case DSS_PLL_HDMI:
+               case DSS_CLK_SRC_HDMI_PLL:
                        val = 2; break;
                default:
                        DSSERR("error in PLL mux config for LCD2\n");
-                       return;
+                       return -EINVAL;
                }
 
                break;
        case OMAP_DSS_CHANNEL_LCD3:
                shift = 7;
 
-               switch (pll_id) {
-               case DSS_PLL_VIDEO1:
-                       val = 1; break;
-               case DSS_PLL_VIDEO2:
+               switch (clk_src) {
+               case DSS_CLK_SRC_PLL2_1:
                        val = 0; break;
-               case DSS_PLL_HDMI:
+               case DSS_CLK_SRC_PLL1_3:
+                       val = 1; break;
+               case DSS_CLK_SRC_HDMI_PLL:
                        val = 2; break;
                default:
                        DSSERR("error in PLL mux config for LCD3\n");
-                       return;
+                       return -EINVAL;
                }
 
                break;
        default:
                DSSERR("error in PLL mux config\n");
-               return;
+               return -EINVAL;
        }
 
        regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
                0x3 << shift, val << shift);
+
+       return 0;
 }
 
 void dss_sdi_init(int datapairs)
@@ -353,14 +360,14 @@ void dss_sdi_disable(void)
        REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
 }
 
-const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src)
+const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
 {
        return dss_generic_clk_source_names[clk_src];
 }
 
 void dss_dump_clocks(struct seq_file *s)
 {
-       const char *fclk_name, *fclk_real_name;
+       const char *fclk_name;
        unsigned long fclk_rate;
 
        if (dss_runtime_get())
@@ -368,12 +375,11 @@ void dss_dump_clocks(struct seq_file *s)
 
        seq_printf(s, "- DSS -\n");
 
-       fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
-       fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
+       fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK);
        fclk_rate = clk_get_rate(dss.dss_clk);
 
-       seq_printf(s, "%s (%s) = %lu\n",
-                       fclk_name, fclk_real_name,
+       seq_printf(s, "%s = %lu\n",
+                       fclk_name,
                        fclk_rate);
 
        dss_runtime_put();
@@ -402,19 +408,42 @@ static void dss_dump_regs(struct seq_file *s)
 #undef DUMPREG
 }
 
-static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
+static int dss_get_channel_index(enum omap_channel channel)
+{
+       switch (channel) {
+       case OMAP_DSS_CHANNEL_LCD:
+               return 0;
+       case OMAP_DSS_CHANNEL_LCD2:
+               return 1;
+       case OMAP_DSS_CHANNEL_LCD3:
+               return 2;
+       default:
+               WARN_ON(1);
+               return 0;
+       }
+}
+
+static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
 {
        int b;
        u8 start, end;
 
+       /*
+        * We always use PRCM clock as the DISPC func clock, except on DSS3,
+        * where we don't have separate DISPC and LCD clock sources.
+        */
+       if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) &&
+               clk_src != DSS_CLK_SRC_FCK))
+               return;
+
        switch (clk_src) {
-       case OMAP_DSS_CLK_SRC_FCK:
+       case DSS_CLK_SRC_FCK:
                b = 0;
                break;
-       case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
+       case DSS_CLK_SRC_PLL1_1:
                b = 1;
                break;
-       case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
+       case DSS_CLK_SRC_PLL2_1:
                b = 2;
                break;
        default:
@@ -430,19 +459,19 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
 }
 
 void dss_select_dsi_clk_source(int dsi_module,
-               enum omap_dss_clk_source clk_src)
+               enum dss_clk_source clk_src)
 {
        int b, pos;
 
        switch (clk_src) {
-       case OMAP_DSS_CLK_SRC_FCK:
+       case DSS_CLK_SRC_FCK:
                b = 0;
                break;
-       case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI:
+       case DSS_CLK_SRC_PLL1_2:
                BUG_ON(dsi_module != 0);
                b = 1;
                break;
-       case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI:
+       case DSS_CLK_SRC_PLL2_2:
                BUG_ON(dsi_module != 1);
                b = 1;
                break;
@@ -457,59 +486,125 @@ void dss_select_dsi_clk_source(int dsi_module,
        dss.dsi_clk_source[dsi_module] = clk_src;
 }
 
+static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
+       enum dss_clk_source clk_src)
+{
+       const u8 ctrl_bits[] = {
+               [OMAP_DSS_CHANNEL_LCD] = 0,
+               [OMAP_DSS_CHANNEL_LCD2] = 12,
+               [OMAP_DSS_CHANNEL_LCD3] = 19,
+       };
+
+       u8 ctrl_bit = ctrl_bits[channel];
+       int r;
+
+       if (clk_src == DSS_CLK_SRC_FCK) {
+               /* LCDx_CLK_SWITCH */
+               REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+               return -EINVAL;
+       }
+
+       r = dss_ctrl_pll_set_control_mux(clk_src, channel);
+       if (r)
+               return r;
+
+       REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+
+       return 0;
+}
+
+static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
+       enum dss_clk_source clk_src)
+{
+       const u8 ctrl_bits[] = {
+               [OMAP_DSS_CHANNEL_LCD] = 0,
+               [OMAP_DSS_CHANNEL_LCD2] = 12,
+               [OMAP_DSS_CHANNEL_LCD3] = 19,
+       };
+       const enum dss_clk_source allowed_plls[] = {
+               [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
+               [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK,
+               [OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1,
+       };
+
+       u8 ctrl_bit = ctrl_bits[channel];
+
+       if (clk_src == DSS_CLK_SRC_FCK) {
+               /* LCDx_CLK_SWITCH */
+               REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+               return -EINVAL;
+       }
+
+       if (WARN_ON(allowed_plls[channel] != clk_src))
+               return -EINVAL;
+
+       REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+
+       return 0;
+}
+
+static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
+       enum dss_clk_source clk_src)
+{
+       const u8 ctrl_bits[] = {
+               [OMAP_DSS_CHANNEL_LCD] = 0,
+               [OMAP_DSS_CHANNEL_LCD2] = 12,
+       };
+       const enum dss_clk_source allowed_plls[] = {
+               [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
+               [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1,
+       };
+
+       u8 ctrl_bit = ctrl_bits[channel];
+
+       if (clk_src == DSS_CLK_SRC_FCK) {
+               /* LCDx_CLK_SWITCH */
+               REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+               return 0;
+       }
+
+       if (WARN_ON(allowed_plls[channel] != clk_src))
+               return -EINVAL;
+
+       REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+
+       return 0;
+}
+
 void dss_select_lcd_clk_source(enum omap_channel channel,
-               enum omap_dss_clk_source clk_src)
+               enum dss_clk_source clk_src)
 {
-       int b, ix, pos;
+       int idx = dss_get_channel_index(channel);
+       int r;
 
        if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
                dss_select_dispc_clk_source(clk_src);
+               dss.lcd_clk_source[idx] = clk_src;
                return;
        }
 
-       switch (clk_src) {
-       case OMAP_DSS_CLK_SRC_FCK:
-               b = 0;
-               break;
-       case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
-               BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
-               b = 1;
-               break;
-       case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
-               BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 &&
-                      channel != OMAP_DSS_CHANNEL_LCD3);
-               b = 1;
-               break;
-       default:
-               BUG();
+       r = dss.feat->select_lcd_source(channel, clk_src);
+       if (r)
                return;
-       }
-
-       pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
-            (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19);
-       REG_FLD_MOD(DSS_CONTROL, b, pos, pos);  /* LCDx_CLK_SWITCH */
 
-       ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
-           (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
-       dss.lcd_clk_source[ix] = clk_src;
+       dss.lcd_clk_source[idx] = clk_src;
 }
 
-enum omap_dss_clk_source dss_get_dispc_clk_source(void)
+enum dss_clk_source dss_get_dispc_clk_source(void)
 {
        return dss.dispc_clk_source;
 }
 
-enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module)
+enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
 {
        return dss.dsi_clk_source[dsi_module];
 }
 
-enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
 {
        if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
-               int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
-                       (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
-               return dss.lcd_clk_source[ix];
+               int idx = dss_get_channel_index(channel);
+               return dss.lcd_clk_source[idx];
        } else {
                /* LCD_CLK source is the same as DISPC_FCLK source for
                 * OMAP2 and OMAP3 */
@@ -858,6 +953,7 @@ static const struct dss_features omap44xx_dss_feats = {
        .dpi_select_source      =       &dss_dpi_select_source_omap4,
        .ports                  =       omap2plus_ports,
        .num_ports              =       ARRAY_SIZE(omap2plus_ports),
+       .select_lcd_source      =       &dss_lcd_clk_mux_omap4,
 };
 
 static const struct dss_features omap54xx_dss_feats = {
@@ -867,6 +963,7 @@ static const struct dss_features omap54xx_dss_feats = {
        .dpi_select_source      =       &dss_dpi_select_source_omap5,
        .ports                  =       omap2plus_ports,
        .num_ports              =       ARRAY_SIZE(omap2plus_ports),
+       .select_lcd_source      =       &dss_lcd_clk_mux_omap5,
 };
 
 static const struct dss_features am43xx_dss_feats = {
@@ -885,6 +982,7 @@ static const struct dss_features dra7xx_dss_feats = {
        .dpi_select_source      =       &dss_dpi_select_source_dra7xx,
        .ports                  =       dra7xx_ports,
        .num_ports              =       ARRAY_SIZE(dra7xx_ports),
+       .select_lcd_source      =       &dss_lcd_clk_mux_dra7,
 };
 
 static int dss_init_features(struct platform_device *pdev)
@@ -1142,18 +1240,18 @@ static int dss_bind(struct device *dev)
        /* Select DPLL */
        REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
 
-       dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
+       dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
 
 #ifdef CONFIG_OMAP2_DSS_VENC
        REG_FLD_MOD(DSS_CONTROL, 1, 4, 4);      /* venc dac demen */
        REG_FLD_MOD(DSS_CONTROL, 1, 3, 3);      /* venc clock 4x enable */
        REG_FLD_MOD(DSS_CONTROL, 0, 2, 2);      /* venc clock mode = normal */
 #endif
-       dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
-       dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
-       dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK;
-       dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
-       dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
+       dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
+       dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
+       dss.dispc_clk_source = DSS_CLK_SRC_FCK;
+       dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
+       dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
 
        rev = dss_read_reg(DSS_REVISION);
        printk(KERN_INFO "OMAP DSS rev %d.%d\n",
index 38e6ab50142d42bf988df7d294219b64fef5bcb6..4fd06dc41cb3ac8ebcb895cd4f88ccd45b71e600 100644 (file)
@@ -102,6 +102,20 @@ enum dss_writeback_channel {
        DSS_WB_LCD3_MGR =       7,
 };
 
+enum dss_clk_source {
+       DSS_CLK_SRC_FCK = 0,
+
+       DSS_CLK_SRC_PLL1_1,
+       DSS_CLK_SRC_PLL1_2,
+       DSS_CLK_SRC_PLL1_3,
+
+       DSS_CLK_SRC_PLL2_1,
+       DSS_CLK_SRC_PLL2_2,
+       DSS_CLK_SRC_PLL2_3,
+
+       DSS_CLK_SRC_HDMI_PLL,
+};
+
 enum dss_pll_id {
        DSS_PLL_DSI1,
        DSS_PLL_DSI2,
@@ -114,6 +128,11 @@ struct dss_pll;
 
 #define DSS_PLL_MAX_HSDIVS 4
 
+enum dss_pll_type {
+       DSS_PLL_TYPE_A,
+       DSS_PLL_TYPE_B,
+};
+
 /*
  * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7.
  * Type-B PLLs: clkout[0] refers to m2.
@@ -140,6 +159,8 @@ struct dss_pll_ops {
 };
 
 struct dss_pll_hw {
+       enum dss_pll_type type;
+
        unsigned n_max;
        unsigned m_min;
        unsigned m_max;
@@ -227,7 +248,7 @@ unsigned long dss_get_dispc_clk_rate(void);
 int dss_dpi_select_source(int port, enum omap_channel channel);
 void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
 enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
-const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
+const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
 void dss_dump_clocks(struct seq_file *s);
 
 /* DSS VIDEO PLL */
@@ -244,20 +265,18 @@ void dss_debug_dump_clocks(struct seq_file *s);
 #endif
 
 void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
-void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
-       enum omap_channel channel);
 
 void dss_sdi_init(int datapairs);
 int dss_sdi_enable(void);
 void dss_sdi_disable(void);
 
 void dss_select_dsi_clk_source(int dsi_module,
-               enum omap_dss_clk_source clk_src);
+               enum dss_clk_source clk_src);
 void dss_select_lcd_clk_source(enum omap_channel channel,
-               enum omap_dss_clk_source clk_src);
-enum omap_dss_clk_source dss_get_dispc_clk_source(void);
-enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module);
-enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
+               enum dss_clk_source clk_src);
+enum dss_clk_source dss_get_dispc_clk_source(void);
+enum dss_clk_source dss_get_dsi_clk_source(int dsi_module);
+enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
 
 void dss_set_venc_output(enum omap_dss_venc_type type);
 void dss_set_dac_pwrdn_bgz(bool enable);
@@ -409,17 +428,23 @@ typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
 int dss_pll_register(struct dss_pll *pll);
 void dss_pll_unregister(struct dss_pll *pll);
 struct dss_pll *dss_pll_find(const char *name);
+struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src);
+unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
 int dss_pll_enable(struct dss_pll *pll);
 void dss_pll_disable(struct dss_pll *pll);
 int dss_pll_set_config(struct dss_pll *pll,
                const struct dss_pll_clock_info *cinfo);
 
-bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
+bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
                unsigned long out_min, unsigned long out_max,
                dss_hsdiv_calc_func func, void *data);
-bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
+bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
                unsigned long pll_min, unsigned long pll_max,
                dss_pll_calc_func func, void *data);
+
+bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
+       unsigned long target_clkout, struct dss_pll_clock_info *cinfo);
+
 int dss_pll_write_config_type_a(struct dss_pll *pll,
                const struct dss_pll_clock_info *cinfo);
 int dss_pll_write_config_type_b(struct dss_pll *pll,
index c886a2927f73f77d5e6054eec26f5d9d61427ee3..ee5b93ce2763d73cfbebd6f8b00db561bd604a2c 100644 (file)
@@ -23,8 +23,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 #include "dss_features.h"
 
@@ -50,7 +49,6 @@ struct omap_dss_features {
        const enum omap_dss_output_id *supported_outputs;
        const enum omap_color_mode *supported_color_modes;
        const enum omap_overlay_caps *overlay_caps;
-       const char * const *clksrc_names;
        const struct dss_param_range *dss_params;
 
        const enum omap_dss_rotation_type supported_rotation_types;
@@ -389,34 +387,6 @@ static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
                OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
 };
 
-static const char * const omap2_dss_clk_source_names[] = {
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC]  = "N/A",
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI]    = "N/A",
-       [OMAP_DSS_CLK_SRC_FCK]                  = "DSS_FCLK1",
-};
-
-static const char * const omap3_dss_clk_source_names[] = {
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC]  = "DSI1_PLL_FCLK",
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI]    = "DSI2_PLL_FCLK",
-       [OMAP_DSS_CLK_SRC_FCK]                  = "DSS1_ALWON_FCLK",
-};
-
-static const char * const omap4_dss_clk_source_names[] = {
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC]  = "PLL1_CLK1",
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI]    = "PLL1_CLK2",
-       [OMAP_DSS_CLK_SRC_FCK]                  = "DSS_FCLK",
-       [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1",
-       [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI]   = "PLL2_CLK2",
-};
-
-static const char * const omap5_dss_clk_source_names[] = {
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC]  = "DPLL_DSI1_A_CLK1",
-       [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI]    = "DPLL_DSI1_A_CLK2",
-       [OMAP_DSS_CLK_SRC_FCK]                  = "DSS_CLK",
-       [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1",
-       [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI]   = "DPLL_DSI1_C_CLK2",
-};
-
 static const struct dss_param_range omap2_dss_param_range[] = {
        [FEAT_PARAM_DSS_FCK]                    = { 0, 133000000 },
        [FEAT_PARAM_DSS_PCD]                    = { 2, 255 },
@@ -631,7 +601,6 @@ static const struct omap_dss_features omap2_dss_features = {
        .supported_outputs = omap2_dss_supported_outputs,
        .supported_color_modes = omap2_dss_supported_color_modes,
        .overlay_caps = omap2_dss_overlay_caps,
-       .clksrc_names = omap2_dss_clk_source_names,
        .dss_params = omap2_dss_param_range,
        .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
@@ -652,7 +621,6 @@ static const struct omap_dss_features omap3430_dss_features = {
        .supported_outputs = omap3430_dss_supported_outputs,
        .supported_color_modes = omap3_dss_supported_color_modes,
        .overlay_caps = omap3430_dss_overlay_caps,
-       .clksrc_names = omap3_dss_clk_source_names,
        .dss_params = omap3_dss_param_range,
        .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
@@ -676,7 +644,6 @@ static const struct omap_dss_features am35xx_dss_features = {
        .supported_outputs = omap3430_dss_supported_outputs,
        .supported_color_modes = omap3_dss_supported_color_modes,
        .overlay_caps = omap3430_dss_overlay_caps,
-       .clksrc_names = omap3_dss_clk_source_names,
        .dss_params = omap3_dss_param_range,
        .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
@@ -696,7 +663,6 @@ static const struct omap_dss_features am43xx_dss_features = {
        .supported_outputs = am43xx_dss_supported_outputs,
        .supported_color_modes = omap3_dss_supported_color_modes,
        .overlay_caps = omap3430_dss_overlay_caps,
-       .clksrc_names = omap2_dss_clk_source_names,
        .dss_params = am43xx_dss_param_range,
        .supported_rotation_types = OMAP_DSS_ROT_DMA,
        .buffer_size_unit = 1,
@@ -716,7 +682,6 @@ static const struct omap_dss_features omap3630_dss_features = {
        .supported_outputs = omap3630_dss_supported_outputs,
        .supported_color_modes = omap3_dss_supported_color_modes,
        .overlay_caps = omap3630_dss_overlay_caps,
-       .clksrc_names = omap3_dss_clk_source_names,
        .dss_params = omap3_dss_param_range,
        .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
        .buffer_size_unit = 1,
@@ -738,7 +703,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features  = {
        .supported_outputs = omap4_dss_supported_outputs,
        .supported_color_modes = omap4_dss_supported_color_modes,
        .overlay_caps = omap4_dss_overlay_caps,
-       .clksrc_names = omap4_dss_clk_source_names,
        .dss_params = omap4_dss_param_range,
        .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
@@ -759,7 +723,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
        .supported_outputs = omap4_dss_supported_outputs,
        .supported_color_modes = omap4_dss_supported_color_modes,
        .overlay_caps = omap4_dss_overlay_caps,
-       .clksrc_names = omap4_dss_clk_source_names,
        .dss_params = omap4_dss_param_range,
        .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
@@ -780,7 +743,6 @@ static const struct omap_dss_features omap4_dss_features = {
        .supported_outputs = omap4_dss_supported_outputs,
        .supported_color_modes = omap4_dss_supported_color_modes,
        .overlay_caps = omap4_dss_overlay_caps,
-       .clksrc_names = omap4_dss_clk_source_names,
        .dss_params = omap4_dss_param_range,
        .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
@@ -801,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = {
        .supported_outputs = omap5_dss_supported_outputs,
        .supported_color_modes = omap4_dss_supported_color_modes,
        .overlay_caps = omap4_dss_overlay_caps,
-       .clksrc_names = omap5_dss_clk_source_names,
        .dss_params = omap5_dss_param_range,
        .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
        .buffer_size_unit = 16,
@@ -859,11 +820,6 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
                        color_mode;
 }
 
-const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
-{
-       return omap_current_dss_features->clksrc_names[id];
-}
-
 u32 dss_feat_get_buffer_size_unit(void)
 {
        return omap_current_dss_features->buffer_size_unit;
index 3d67d39f192ff1d5b0ff2cc30c7071fab0683524..bb4b7f0e642be8a9028fcdac5b41d287ba7c1cc6 100644 (file)
@@ -91,7 +91,6 @@ unsigned long dss_feat_get_param_max(enum dss_range_param param);
 enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
 bool dss_feat_color_mode_supported(enum omap_plane plane,
                enum omap_color_mode color_mode);
-const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
 
 u32 dss_feat_get_buffer_size_unit(void);       /* in bytes */
 u32 dss_feat_get_burst_size_unit(void);                /* in bytes */
index 53616b02b61371b1eb0d6c0c6819cb931cac39a9..63e71154586591b4b213ce2c43f60c5b3b80668d 100644 (file)
@@ -23,8 +23,9 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/hdmi.h>
-#include <video/omapdss.h>
+#include <sound/omap-hdmi-audio.h>
 
+#include "omapdss.h"
 #include "dss.h"
 
 /* HDMI Wrapper */
@@ -240,6 +241,7 @@ struct hdmi_pll_data {
 
        void __iomem *base;
 
+       struct platform_device *pdev;
        struct hdmi_wp_data *wp;
 };
 
@@ -306,8 +308,6 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
 
 /* HDMI PLL funcs */
 void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
-void hdmi_pll_compute(struct hdmi_pll_data *pll,
-       unsigned long target_tmds, struct dss_pll_clock_info *pi);
 int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
        struct hdmi_wp_data *wp);
 void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
index f892ae157ff3b429fc1532da85daaf65871f5e08..cbd28dfdb86ac4a0b0c1c571eacf9a26d998a8b3 100644 (file)
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/component.h>
-#include <video/omapdss.h>
+#include <linux/of.h>
 #include <sound/omap-hdmi-audio.h>
 
+#include "omapdss.h"
 #include "hdmi4_core.h"
 #include "dss.h"
 #include "dss_features.h"
@@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
 
 static int hdmi_init_regulator(void)
 {
-       int r;
        struct regulator *reg;
 
        if (hdmi.vdda_reg != NULL)
@@ -114,15 +114,6 @@ static int hdmi_init_regulator(void)
                return PTR_ERR(reg);
        }
 
-       if (regulator_can_change_voltage(reg)) {
-               r = regulator_set_voltage(reg, 1800000, 1800000);
-               if (r) {
-                       devm_regulator_put(reg);
-                       DSSWARN("can't set the regulator voltage\n");
-                       return r;
-               }
-       }
-
        hdmi.vdda_reg = reg;
 
        return 0;
@@ -186,7 +177,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
        if (p->double_pixel)
                pc *= 2;
 
-       hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
+       /* DSS_HDMI_TCLK is bitclk / 10 */
+       pc *= 10;
+
+       dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+               pc, &hdmi_cinfo);
 
        r = dss_pll_enable(&hdmi.pll.pll);
        if (r) {
@@ -213,9 +208,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
 
        hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
 
-       /* bypass TV gamma table */
-       dispc_enable_gamma_table(0);
-
        /* tv size */
        dss_mgr_set_timings(channel, p);
 
index fa72e735dad2f995c127a265cd6b82a43bf9c73c..ef3afe99e487b68ecbfa5f08f0f7ac54f305b572 100644 (file)
@@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
 static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
 {
        DSSDBG("Enter hdmi_core_powerdown_disable\n");
-       REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
+       REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
 }
 
 static void hdmi_core_swreset_release(struct hdmi_core_data *core)
index a43f7b10e11344bc63c19f7904ce21d0d14b15e4..0c0a5139a30103dd6686e2964102d83eab08fb00 100644 (file)
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/component.h>
-#include <video/omapdss.h>
+#include <linux/of.h>
 #include <sound/omap-hdmi-audio.h>
 
+#include "omapdss.h"
 #include "hdmi5_core.h"
 #include "dss.h"
 #include "dss_features.h"
@@ -119,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
 
 static int hdmi_init_regulator(void)
 {
-       int r;
        struct regulator *reg;
 
        if (hdmi.vdda_reg != NULL)
@@ -131,15 +131,6 @@ static int hdmi_init_regulator(void)
                return PTR_ERR(reg);
        }
 
-       if (regulator_can_change_voltage(reg)) {
-               r = regulator_set_voltage(reg, 1800000, 1800000);
-               if (r) {
-                       devm_regulator_put(reg);
-                       DSSWARN("can't set the regulator voltage\n");
-                       return r;
-               }
-       }
-
        hdmi.vdda_reg = reg;
 
        return 0;
@@ -198,7 +189,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
        if (p->double_pixel)
                pc *= 2;
 
-       hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo);
+       /* DSS_HDMI_TCLK is bitclk / 10 */
+       pc *= 10;
+
+       dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+               pc, &hdmi_cinfo);
 
        /* disable and clear irqs */
        hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
@@ -230,9 +225,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
 
        hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
 
-       /* bypass TV gamma table */
-       dispc_enable_gamma_table(0);
-
        /* tv size */
        dss_mgr_set_timings(channel, p);
 
index 6a397520cae57f3f6fee331ba8a34e9988a2f196..8ab2093daa1285db12b235a08f7a0f07cc0ef22d 100644 (file)
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
 {
        void __iomem *base = core->base;
        const unsigned long long iclk = 266000000;      /* DSS L3 ICLK */
-       const unsigned ss_scl_high = 4000;              /* ns */
-       const unsigned ss_scl_low = 4700;               /* ns */
+       const unsigned ss_scl_high = 4600;              /* ns */
+       const unsigned ss_scl_low = 5400;               /* ns */
        const unsigned fs_scl_high = 600;               /* ns */
        const unsigned fs_scl_low = 1300;               /* ns */
        const unsigned sda_hold = 1000;                 /* ns */
@@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
 
        c = (ptr[1] >> 6) & 0x3;
        m = (ptr[1] >> 4) & 0x3;
-       r = (ptr[1] >> 0) & 0x3;
+       r = (ptr[1] >> 0) & 0xf;
 
        itc = (ptr[2] >> 7) & 0x1;
        ec = (ptr[2] >> 4) & 0x7;
index 1b8fcc6c4ba1ba9381709d4970cb25a950640fdb..4dfb67fe5f6dcb75adae7321e4f86aee7ed76b1b 100644 (file)
@@ -4,8 +4,8 @@
 #include <linux/kernel.h>
 #include <linux/err.h>
 #include <linux/of.h>
-#include <video/omapdss.h>
 
+#include "omapdss.h"
 #include "hdmi.h"
 
 int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
index 1f5d19c119cea01dbf17801f9f149cf0e38afc06..3ead47cccac57399db94b61223d712755c76258a 100644 (file)
@@ -13,8 +13,9 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <video/omapdss.h>
+#include <linux/seq_file.h>
 
+#include "omapdss.h"
 #include "dss.h"
 #include "hdmi.h"
 
index 06e23a7c432ccea22b02cfb32e47d2516ee757e2..b8bf6a9e555721c40517a7324aacc00aeba53c5a 100644 (file)
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/seq_file.h>
+#include <linux/pm_runtime.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 #include "hdmi.h"
 
@@ -38,71 +39,14 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
        DUMPPLL(PLLCTRL_CFG4);
 }
 
-void hdmi_pll_compute(struct hdmi_pll_data *pll,
-       unsigned long target_tmds, struct dss_pll_clock_info *pi)
-{
-       unsigned long fint, clkdco, clkout;
-       unsigned long target_bitclk, target_clkdco;
-       unsigned long min_dco;
-       unsigned n, m, mf, m2, sd;
-       unsigned long clkin;
-       const struct dss_pll_hw *hw = pll->pll.hw;
-
-       clkin = clk_get_rate(pll->pll.clkin);
-
-       DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds);
-
-       target_bitclk = target_tmds * 10;
-
-       /* Fint */
-       n = DIV_ROUND_UP(clkin, hw->fint_max);
-       fint = clkin / n;
-
-       /* adjust m2 so that the clkdco will be high enough */
-       min_dco = roundup(hw->clkdco_min, fint);
-       m2 = DIV_ROUND_UP(min_dco, target_bitclk);
-       if (m2 == 0)
-               m2 = 1;
-
-       target_clkdco = target_bitclk * m2;
-       m = target_clkdco / fint;
-
-       clkdco = fint * m;
-
-       /* adjust clkdco with fractional mf */
-       if (WARN_ON(target_clkdco - clkdco > fint))
-               mf = 0;
-       else
-               mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
-
-       if (mf > 0)
-               clkdco += (u32)div_u64((u64)mf * fint, 262144);
-
-       clkout = clkdco / m2;
-
-       /* sigma-delta */
-       sd = DIV_ROUND_UP(fint * m, 250000000);
-
-       DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
-               n, m, mf, m2, sd);
-       DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
-
-       pi->n = n;
-       pi->m = m;
-       pi->mf = mf;
-       pi->mX[0] = m2;
-       pi->sd = sd;
-
-       pi->fint = fint;
-       pi->clkdco = clkdco;
-       pi->clkout[0] = clkout;
-}
-
 static int hdmi_pll_enable(struct dss_pll *dsspll)
 {
        struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
        struct hdmi_wp_data *wp = pll->wp;
-       u16 r = 0;
+       int r;
+
+       r = pm_runtime_get_sync(&pll->pdev->dev);
+       WARN_ON(r < 0);
 
        dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
 
@@ -117,10 +61,14 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
 {
        struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
        struct hdmi_wp_data *wp = pll->wp;
+       int r;
 
        hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
 
        dss_ctrl_pll_enable(DSS_PLL_HDMI, false);
+
+       r = pm_runtime_put_sync(&pll->pdev->dev);
+       WARN_ON(r < 0 && r != -ENOSYS);
 }
 
 static const struct dss_pll_ops dsi_pll_ops = {
@@ -130,6 +78,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
 };
 
 static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
+       .type = DSS_PLL_TYPE_B,
+
        .n_max = 255,
        .m_min = 20,
        .m_max = 4095,
@@ -153,6 +103,8 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
 };
 
 static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
+       .type = DSS_PLL_TYPE_B,
+
        .n_max = 255,
        .m_min = 20,
        .m_max = 2045,
@@ -224,6 +176,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
        int r;
        struct resource *res;
 
+       pll->pdev = pdev;
        pll->wp = wp;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
index 13442b9052d1fa131edbd4795301f928daad9c67..203694a52d1818e51d28d520cca69d0117518681 100644 (file)
@@ -14,8 +14,9 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
-#include <video/omapdss.h>
+#include <linux/seq_file.h>
 
+#include "omapdss.h"
 #include "dss.h"
 #include "hdmi.h"
 
index d7e7c909bbc2a2dd7bff19c4d2b903ed3a48f05c..6eaf1adbd60636d43c71b355b00d893ce8bc5db2 100644 (file)
 #ifndef __OMAP_DRM_DSS_H
 #define __OMAP_DRM_DSS_H
 
-#include <video/omapdss.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <video/videomode.h>
+#include <linux/platform_data/omapdss.h>
+#include <uapi/drm/drm_mode.h>
+
+#define DISPC_IRQ_FRAMEDONE            (1 << 0)
+#define DISPC_IRQ_VSYNC                        (1 << 1)
+#define DISPC_IRQ_EVSYNC_EVEN          (1 << 2)
+#define DISPC_IRQ_EVSYNC_ODD           (1 << 3)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT    (1 << 4)
+#define DISPC_IRQ_PROG_LINE_NUM                (1 << 5)
+#define DISPC_IRQ_GFX_FIFO_UNDERFLOW   (1 << 6)
+#define DISPC_IRQ_GFX_END_WIN          (1 << 7)
+#define DISPC_IRQ_PAL_GAMMA_MASK       (1 << 8)
+#define DISPC_IRQ_OCP_ERR              (1 << 9)
+#define DISPC_IRQ_VID1_FIFO_UNDERFLOW  (1 << 10)
+#define DISPC_IRQ_VID1_END_WIN         (1 << 11)
+#define DISPC_IRQ_VID2_FIFO_UNDERFLOW  (1 << 12)
+#define DISPC_IRQ_VID2_END_WIN         (1 << 13)
+#define DISPC_IRQ_SYNC_LOST            (1 << 14)
+#define DISPC_IRQ_SYNC_LOST_DIGIT      (1 << 15)
+#define DISPC_IRQ_WAKEUP               (1 << 16)
+#define DISPC_IRQ_SYNC_LOST2           (1 << 17)
+#define DISPC_IRQ_VSYNC2               (1 << 18)
+#define DISPC_IRQ_VID3_END_WIN         (1 << 19)
+#define DISPC_IRQ_VID3_FIFO_UNDERFLOW  (1 << 20)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT2   (1 << 21)
+#define DISPC_IRQ_FRAMEDONE2           (1 << 22)
+#define DISPC_IRQ_FRAMEDONEWB          (1 << 23)
+#define DISPC_IRQ_FRAMEDONETV          (1 << 24)
+#define DISPC_IRQ_WBBUFFEROVERFLOW     (1 << 25)
+#define DISPC_IRQ_WBUNCOMPLETEERROR    (1 << 26)
+#define DISPC_IRQ_SYNC_LOST3           (1 << 27)
+#define DISPC_IRQ_VSYNC3               (1 << 28)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT3   (1 << 29)
+#define DISPC_IRQ_FRAMEDONE3           (1 << 30)
+
+struct omap_dss_device;
+struct omap_overlay_manager;
+struct dss_lcd_mgr_config;
+struct snd_aes_iec958;
+struct snd_cea_861_aud_if;
+struct hdmi_avi_infoframe;
+
+enum omap_display_type {
+       OMAP_DISPLAY_TYPE_NONE          = 0,
+       OMAP_DISPLAY_TYPE_DPI           = 1 << 0,
+       OMAP_DISPLAY_TYPE_DBI           = 1 << 1,
+       OMAP_DISPLAY_TYPE_SDI           = 1 << 2,
+       OMAP_DISPLAY_TYPE_DSI           = 1 << 3,
+       OMAP_DISPLAY_TYPE_VENC          = 1 << 4,
+       OMAP_DISPLAY_TYPE_HDMI          = 1 << 5,
+       OMAP_DISPLAY_TYPE_DVI           = 1 << 6,
+};
+
+enum omap_plane {
+       OMAP_DSS_GFX    = 0,
+       OMAP_DSS_VIDEO1 = 1,
+       OMAP_DSS_VIDEO2 = 2,
+       OMAP_DSS_VIDEO3 = 3,
+       OMAP_DSS_WB     = 4,
+};
+
+enum omap_channel {
+       OMAP_DSS_CHANNEL_LCD    = 0,
+       OMAP_DSS_CHANNEL_DIGIT  = 1,
+       OMAP_DSS_CHANNEL_LCD2   = 2,
+       OMAP_DSS_CHANNEL_LCD3   = 3,
+       OMAP_DSS_CHANNEL_WB     = 4,
+};
+
+enum omap_color_mode {
+       OMAP_DSS_COLOR_CLUT1    = 1 << 0,  /* BITMAP 1 */
+       OMAP_DSS_COLOR_CLUT2    = 1 << 1,  /* BITMAP 2 */
+       OMAP_DSS_COLOR_CLUT4    = 1 << 2,  /* BITMAP 4 */
+       OMAP_DSS_COLOR_CLUT8    = 1 << 3,  /* BITMAP 8 */
+       OMAP_DSS_COLOR_RGB12U   = 1 << 4,  /* RGB12, 16-bit container */
+       OMAP_DSS_COLOR_ARGB16   = 1 << 5,  /* ARGB16 */
+       OMAP_DSS_COLOR_RGB16    = 1 << 6,  /* RGB16 */
+       OMAP_DSS_COLOR_RGB24U   = 1 << 7,  /* RGB24, 32-bit container */
+       OMAP_DSS_COLOR_RGB24P   = 1 << 8,  /* RGB24, 24-bit container */
+       OMAP_DSS_COLOR_YUV2     = 1 << 9,  /* YUV2 4:2:2 co-sited */
+       OMAP_DSS_COLOR_UYVY     = 1 << 10, /* UYVY 4:2:2 co-sited */
+       OMAP_DSS_COLOR_ARGB32   = 1 << 11, /* ARGB32 */
+       OMAP_DSS_COLOR_RGBA32   = 1 << 12, /* RGBA32 */
+       OMAP_DSS_COLOR_RGBX32   = 1 << 13, /* RGBx32 */
+       OMAP_DSS_COLOR_NV12             = 1 << 14, /* NV12 format: YUV 4:2:0 */
+       OMAP_DSS_COLOR_RGBA16           = 1 << 15, /* RGBA16 - 4444 */
+       OMAP_DSS_COLOR_RGBX16           = 1 << 16, /* RGBx16 - 4444 */
+       OMAP_DSS_COLOR_ARGB16_1555      = 1 << 17, /* ARGB16 - 1555 */
+       OMAP_DSS_COLOR_XRGB16_1555      = 1 << 18, /* xRGB16 - 1555 */
+};
+
+enum omap_dss_load_mode {
+       OMAP_DSS_LOAD_CLUT_AND_FRAME    = 0,
+       OMAP_DSS_LOAD_CLUT_ONLY         = 1,
+       OMAP_DSS_LOAD_FRAME_ONLY        = 2,
+       OMAP_DSS_LOAD_CLUT_ONCE_FRAME   = 3,
+};
+
+enum omap_dss_trans_key_type {
+       OMAP_DSS_COLOR_KEY_GFX_DST = 0,
+       OMAP_DSS_COLOR_KEY_VID_SRC = 1,
+};
+
+enum omap_rfbi_te_mode {
+       OMAP_DSS_RFBI_TE_MODE_1 = 1,
+       OMAP_DSS_RFBI_TE_MODE_2 = 2,
+};
+
+enum omap_dss_signal_level {
+       OMAPDSS_SIG_ACTIVE_LOW,
+       OMAPDSS_SIG_ACTIVE_HIGH,
+};
+
+enum omap_dss_signal_edge {
+       OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+       OMAPDSS_DRIVE_SIG_RISING_EDGE,
+};
+
+enum omap_dss_venc_type {
+       OMAP_DSS_VENC_TYPE_COMPOSITE,
+       OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+enum omap_dss_dsi_pixel_format {
+       OMAP_DSS_DSI_FMT_RGB888,
+       OMAP_DSS_DSI_FMT_RGB666,
+       OMAP_DSS_DSI_FMT_RGB666_PACKED,
+       OMAP_DSS_DSI_FMT_RGB565,
+};
+
+enum omap_dss_dsi_mode {
+       OMAP_DSS_DSI_CMD_MODE = 0,
+       OMAP_DSS_DSI_VIDEO_MODE,
+};
+
+enum omap_display_caps {
+       OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE      = 1 << 0,
+       OMAP_DSS_DISPLAY_CAP_TEAR_ELIM          = 1 << 1,
+};
+
+enum omap_dss_display_state {
+       OMAP_DSS_DISPLAY_DISABLED = 0,
+       OMAP_DSS_DISPLAY_ACTIVE,
+};
+
+enum omap_dss_rotation_type {
+       OMAP_DSS_ROT_DMA        = 1 << 0,
+       OMAP_DSS_ROT_VRFB       = 1 << 1,
+       OMAP_DSS_ROT_TILER      = 1 << 2,
+};
+
+/* clockwise rotation angle */
+enum omap_dss_rotation_angle {
+       OMAP_DSS_ROT_0   = 0,
+       OMAP_DSS_ROT_90  = 1,
+       OMAP_DSS_ROT_180 = 2,
+       OMAP_DSS_ROT_270 = 3,
+};
+
+enum omap_overlay_caps {
+       OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
+       OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1,
+       OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2,
+       OMAP_DSS_OVL_CAP_ZORDER = 1 << 3,
+       OMAP_DSS_OVL_CAP_POS = 1 << 4,
+       OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
+};
+
+enum omap_overlay_manager_caps {
+       OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
+};
+
+enum omap_dss_clk_source {
+       OMAP_DSS_CLK_SRC_FCK = 0,               /* OMAP2/3: DSS1_ALWON_FCLK
+                                                * OMAP4: DSS_FCLK */
+       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC,   /* OMAP3: DSI1_PLL_FCLK
+                                                * OMAP4: PLL1_CLK1 */
+       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI,     /* OMAP3: DSI2_PLL_FCLK
+                                                * OMAP4: PLL1_CLK2 */
+       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC,  /* OMAP4: PLL2_CLK1 */
+       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI,    /* OMAP4: PLL2_CLK2 */
+};
+
+enum omap_hdmi_flags {
+       OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
+};
+
+enum omap_dss_output_id {
+       OMAP_DSS_OUTPUT_DPI     = 1 << 0,
+       OMAP_DSS_OUTPUT_DBI     = 1 << 1,
+       OMAP_DSS_OUTPUT_SDI     = 1 << 2,
+       OMAP_DSS_OUTPUT_DSI1    = 1 << 3,
+       OMAP_DSS_OUTPUT_DSI2    = 1 << 4,
+       OMAP_DSS_OUTPUT_VENC    = 1 << 5,
+       OMAP_DSS_OUTPUT_HDMI    = 1 << 6,
+};
+
+/* RFBI */
+
+struct rfbi_timings {
+       int cs_on_time;
+       int cs_off_time;
+       int we_on_time;
+       int we_off_time;
+       int re_on_time;
+       int re_off_time;
+       int we_cycle_time;
+       int re_cycle_time;
+       int cs_pulse_width;
+       int access_time;
+
+       int clk_div;
+
+       u32 tim[5];             /* set by rfbi_convert_timings() */
+
+       int converted;
+};
+
+/* DSI */
+
+enum omap_dss_dsi_trans_mode {
+       /* Sync Pulses: both sync start and end packets sent */
+       OMAP_DSS_DSI_PULSE_MODE,
+       /* Sync Events: only sync start packets sent */
+       OMAP_DSS_DSI_EVENT_MODE,
+       /* Burst: only sync start packets sent, pixels are time compressed */
+       OMAP_DSS_DSI_BURST_MODE,
+};
+
+struct omap_dss_dsi_videomode_timings {
+       unsigned long hsclk;
+
+       unsigned ndl;
+       unsigned bitspp;
+
+       /* pixels */
+       u16 hact;
+       /* lines */
+       u16 vact;
+
+       /* DSI video mode blanking data */
+       /* Unit: byte clock cycles */
+       u16 hss;
+       u16 hsa;
+       u16 hse;
+       u16 hfp;
+       u16 hbp;
+       /* Unit: line clocks */
+       u16 vsa;
+       u16 vfp;
+       u16 vbp;
+
+       /* DSI blanking modes */
+       int blanking_mode;
+       int hsa_blanking_mode;
+       int hbp_blanking_mode;
+       int hfp_blanking_mode;
+
+       enum omap_dss_dsi_trans_mode trans_mode;
+
+       bool ddr_clk_always_on;
+       int window_sync;
+};
+
+struct omap_dss_dsi_config {
+       enum omap_dss_dsi_mode mode;
+       enum omap_dss_dsi_pixel_format pixel_format;
+       const struct omap_video_timings *timings;
+
+       unsigned long hs_clk_min, hs_clk_max;
+       unsigned long lp_clk_min, lp_clk_max;
+
+       bool ddr_clk_always_on;
+       enum omap_dss_dsi_trans_mode trans_mode;
+};
+
+struct omap_video_timings {
+       /* Unit: pixels */
+       u16 x_res;
+       /* Unit: pixels */
+       u16 y_res;
+       /* Unit: Hz */
+       u32 pixelclock;
+       /* Unit: pixel clocks */
+       u16 hsw;        /* Horizontal synchronization pulse width */
+       /* Unit: pixel clocks */
+       u16 hfp;        /* Horizontal front porch */
+       /* Unit: pixel clocks */
+       u16 hbp;        /* Horizontal back porch */
+       /* Unit: line clocks */
+       u16 vsw;        /* Vertical synchronization pulse width */
+       /* Unit: line clocks */
+       u16 vfp;        /* Vertical front porch */
+       /* Unit: line clocks */
+       u16 vbp;        /* Vertical back porch */
+
+       /* Vsync logic level */
+       enum omap_dss_signal_level vsync_level;
+       /* Hsync logic level */
+       enum omap_dss_signal_level hsync_level;
+       /* Interlaced or Progressive timings */
+       bool interlace;
+       /* Pixel clock edge to drive LCD data */
+       enum omap_dss_signal_edge data_pclk_edge;
+       /* Data enable logic level */
+       enum omap_dss_signal_level de_level;
+       /* Pixel clock edges to drive HSYNC and VSYNC signals */
+       enum omap_dss_signal_edge sync_pclk_edge;
+
+       bool double_pixel;
+};
+
+/* Hardcoded timings for tv modes. Venc only uses these to
+ * identify the mode, and does not actually use the configs
+ * itself. However, the configs should be something that
+ * a normal monitor can also show */
+extern const struct omap_video_timings omap_dss_pal_timings;
+extern const struct omap_video_timings omap_dss_ntsc_timings;
+
+struct omap_dss_cpr_coefs {
+       s16 rr, rg, rb;
+       s16 gr, gg, gb;
+       s16 br, bg, bb;
+};
+
+struct omap_overlay_info {
+       dma_addr_t paddr;
+       dma_addr_t p_uv_addr;  /* for NV12 format */
+       u16 screen_width;
+       u16 width;
+       u16 height;
+       enum omap_color_mode color_mode;
+       u8 rotation;
+       enum omap_dss_rotation_type rotation_type;
+       bool mirror;
+
+       u16 pos_x;
+       u16 pos_y;
+       u16 out_width;  /* if 0, out_width == width */
+       u16 out_height; /* if 0, out_height == height */
+       u8 global_alpha;
+       u8 pre_mult_alpha;
+       u8 zorder;
+};
+
+struct omap_overlay {
+       struct kobject kobj;
+       struct list_head list;
+
+       /* static fields */
+       const char *name;
+       enum omap_plane id;
+       enum omap_color_mode supported_modes;
+       enum omap_overlay_caps caps;
+
+       /* dynamic fields */
+       struct omap_overlay_manager *manager;
+
+       /*
+        * The following functions do not block:
+        *
+        * is_enabled
+        * set_overlay_info
+        * get_overlay_info
+        *
+        * The rest of the functions may block and cannot be called from
+        * interrupt context
+        */
+
+       int (*enable)(struct omap_overlay *ovl);
+       int (*disable)(struct omap_overlay *ovl);
+       bool (*is_enabled)(struct omap_overlay *ovl);
+
+       int (*set_manager)(struct omap_overlay *ovl,
+               struct omap_overlay_manager *mgr);
+       int (*unset_manager)(struct omap_overlay *ovl);
+
+       int (*set_overlay_info)(struct omap_overlay *ovl,
+                       struct omap_overlay_info *info);
+       void (*get_overlay_info)(struct omap_overlay *ovl,
+                       struct omap_overlay_info *info);
+
+       int (*wait_for_go)(struct omap_overlay *ovl);
+
+       struct omap_dss_device *(*get_device)(struct omap_overlay *ovl);
+};
+
+struct omap_overlay_manager_info {
+       u32 default_color;
+
+       enum omap_dss_trans_key_type trans_key_type;
+       u32 trans_key;
+       bool trans_enabled;
+
+       bool partial_alpha_enabled;
+
+       bool cpr_enable;
+       struct omap_dss_cpr_coefs cpr_coefs;
+};
+
+struct omap_overlay_manager {
+       struct kobject kobj;
+
+       /* static fields */
+       const char *name;
+       enum omap_channel id;
+       enum omap_overlay_manager_caps caps;
+       struct list_head overlays;
+       enum omap_display_type supported_displays;
+       enum omap_dss_output_id supported_outputs;
+
+       /* dynamic fields */
+       struct omap_dss_device *output;
+
+       /*
+        * The following functions do not block:
+        *
+        * set_manager_info
+        * get_manager_info
+        * apply
+        *
+        * The rest of the functions may block and cannot be called from
+        * interrupt context
+        */
+
+       int (*set_output)(struct omap_overlay_manager *mgr,
+               struct omap_dss_device *output);
+       int (*unset_output)(struct omap_overlay_manager *mgr);
+
+       int (*set_manager_info)(struct omap_overlay_manager *mgr,
+                       struct omap_overlay_manager_info *info);
+       void (*get_manager_info)(struct omap_overlay_manager *mgr,
+                       struct omap_overlay_manager_info *info);
+
+       int (*apply)(struct omap_overlay_manager *mgr);
+       int (*wait_for_go)(struct omap_overlay_manager *mgr);
+       int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
+
+       struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
+};
+
+/* 22 pins means 1 clk lane and 10 data lanes */
+#define OMAP_DSS_MAX_DSI_PINS 22
+
+struct omap_dsi_pin_config {
+       int num_pins;
+       /*
+        * pin numbers in the following order:
+        * clk+, clk-
+        * data1+, data1-
+        * data2+, data2-
+        * ...
+        */
+       int pins[OMAP_DSS_MAX_DSI_PINS];
+};
+
+struct omap_dss_writeback_info {
+       u32 paddr;
+       u32 p_uv_addr;
+       u16 buf_width;
+       u16 width;
+       u16 height;
+       enum omap_color_mode color_mode;
+       u8 rotation;
+       enum omap_dss_rotation_type rotation_type;
+       bool mirror;
+       u8 pre_mult_alpha;
+};
+
+struct omapdss_dpi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
+};
+
+struct omapdss_sdi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
+};
+
+struct omapdss_dvi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+};
+
+struct omapdss_atv_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       void (*set_type)(struct omap_dss_device *dssdev,
+               enum omap_dss_venc_type type);
+       void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
+               bool invert_polarity);
+
+       int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+       u32 (*get_wss)(struct omap_dss_device *dssdev);
+};
+
+struct omapdss_hdmi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+       bool (*detect)(struct omap_dss_device *dssdev);
+
+       int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
+       int (*set_infoframe)(struct omap_dss_device *dssdev,
+               const struct hdmi_avi_infoframe *avi);
+};
+
+struct omapdss_dsi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
+                       bool enter_ulps);
+
+       /* bus configuration */
+       int (*set_config)(struct omap_dss_device *dssdev,
+                       const struct omap_dss_dsi_config *cfg);
+       int (*configure_pins)(struct omap_dss_device *dssdev,
+                       const struct omap_dsi_pin_config *pin_cfg);
+
+       void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
+                       bool enable);
+       int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+
+       int (*update)(struct omap_dss_device *dssdev, int channel,
+                       void (*callback)(int, void *), void *data);
+
+       void (*bus_lock)(struct omap_dss_device *dssdev);
+       void (*bus_unlock)(struct omap_dss_device *dssdev);
+
+       int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
+       void (*disable_video_output)(struct omap_dss_device *dssdev,
+                       int channel);
+
+       int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
+       int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
+                       int vc_id);
+       void (*release_vc)(struct omap_dss_device *dssdev, int channel);
+
+       /* data transfer */
+       int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
+                       u8 *data, int len);
+       int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
+                       u8 *data, int len);
+       int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
+                       u8 *data, int len);
+
+       int (*gen_write)(struct omap_dss_device *dssdev, int channel,
+                       u8 *data, int len);
+       int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
+                       u8 *data, int len);
+       int (*gen_read)(struct omap_dss_device *dssdev, int channel,
+                       u8 *reqdata, int reqlen,
+                       u8 *data, int len);
+
+       int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
+
+       int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
+                       int channel, u16 plen);
+};
+
+struct omap_dss_device {
+       struct kobject kobj;
+       struct device *dev;
+
+       struct module *owner;
+
+       struct list_head panel_list;
+
+       /* alias in the form of "display%d" */
+       char alias[16];
+
+       enum omap_display_type type;
+       enum omap_display_type output_type;
+
+       union {
+               struct {
+                       u8 data_lines;
+               } dpi;
+
+               struct {
+                       u8 channel;
+                       u8 data_lines;
+               } rfbi;
+
+               struct {
+                       u8 datapairs;
+               } sdi;
+
+               struct {
+                       int module;
+               } dsi;
+
+               struct {
+                       enum omap_dss_venc_type type;
+                       bool invert_polarity;
+               } venc;
+       } phy;
+
+       struct {
+               struct omap_video_timings timings;
+
+               enum omap_dss_dsi_pixel_format dsi_pix_fmt;
+               enum omap_dss_dsi_mode dsi_mode;
+       } panel;
+
+       struct {
+               u8 pixel_size;
+               struct rfbi_timings rfbi_timings;
+       } ctrl;
+
+       const char *name;
+
+       /* used to match device to driver */
+       const char *driver_name;
+
+       void *data;
+
+       struct omap_dss_driver *driver;
+
+       union {
+               const struct omapdss_dpi_ops *dpi;
+               const struct omapdss_sdi_ops *sdi;
+               const struct omapdss_dvi_ops *dvi;
+               const struct omapdss_hdmi_ops *hdmi;
+               const struct omapdss_atv_ops *atv;
+               const struct omapdss_dsi_ops *dsi;
+       } ops;
+
+       /* helper variable for driver suspend/resume */
+       bool activate_after_resume;
+
+       enum omap_display_caps caps;
+
+       struct omap_dss_device *src;
+
+       enum omap_dss_display_state state;
+
+       /* OMAP DSS output specific fields */
+
+       struct list_head list;
+
+       /* DISPC channel for this output */
+       enum omap_channel dispc_channel;
+       bool dispc_channel_connected;
+
+       /* output instance */
+       enum omap_dss_output_id id;
+
+       /* the port number in the DT node */
+       int port_num;
+
+       /* dynamic fields */
+       struct omap_overlay_manager *manager;
+
+       struct omap_dss_device *dst;
+};
+
+struct omap_dss_driver {
+       int (*probe)(struct omap_dss_device *);
+       void (*remove)(struct omap_dss_device *);
+
+       int (*connect)(struct omap_dss_device *dssdev);
+       void (*disconnect)(struct omap_dss_device *dssdev);
+
+       int (*enable)(struct omap_dss_device *display);
+       void (*disable)(struct omap_dss_device *display);
+       int (*run_test)(struct omap_dss_device *display, int test);
+
+       int (*update)(struct omap_dss_device *dssdev,
+                              u16 x, u16 y, u16 w, u16 h);
+       int (*sync)(struct omap_dss_device *dssdev);
+
+       int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+       int (*get_te)(struct omap_dss_device *dssdev);
+
+       u8 (*get_rotate)(struct omap_dss_device *dssdev);
+       int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
+
+       bool (*get_mirror)(struct omap_dss_device *dssdev);
+       int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
+
+       int (*memory_read)(struct omap_dss_device *dssdev,
+                       void *buf, size_t size,
+                       u16 x, u16 y, u16 w, u16 h);
+
+       void (*get_resolution)(struct omap_dss_device *dssdev,
+                       u16 *xres, u16 *yres);
+       void (*get_dimensions)(struct omap_dss_device *dssdev,
+                       u32 *width, u32 *height);
+       int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+       u32 (*get_wss)(struct omap_dss_device *dssdev);
+
+       int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+       bool (*detect)(struct omap_dss_device *dssdev);
+
+       int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
+       int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
+               const struct hdmi_avi_infoframe *avi);
+};
+
+enum omapdss_version omapdss_get_version(void);
+bool omapdss_is_initialized(void);
+
+int omap_dss_register_driver(struct omap_dss_driver *);
+void omap_dss_unregister_driver(struct omap_dss_driver *);
+
+int omapdss_register_display(struct omap_dss_device *dssdev);
+void omapdss_unregister_display(struct omap_dss_device *dssdev);
+
+struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
+void omap_dss_put_device(struct omap_dss_device *dssdev);
+#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
+struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
+struct omap_dss_device *omap_dss_find_device(void *data,
+               int (*match)(struct omap_dss_device *dssdev, void *data));
+const char *omapdss_get_default_display_name(void);
+
+void videomode_to_omap_video_timings(const struct videomode *vm,
+               struct omap_video_timings *ovt);
+void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
+               struct videomode *vm);
+
+int dss_feat_get_num_mgrs(void);
+int dss_feat_get_num_ovls(void);
+enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+
+
+
+int omap_dss_get_num_overlay_managers(void);
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
+
+int omap_dss_get_num_overlays(void);
+struct omap_overlay *omap_dss_get_overlay(int num);
+
+int omapdss_register_output(struct omap_dss_device *output);
+void omapdss_unregister_output(struct omap_dss_device *output);
+struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
+struct omap_dss_device *omap_dss_find_output(const char *name);
+struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
+int omapdss_output_set_device(struct omap_dss_device *out,
+               struct omap_dss_device *dssdev);
+int omapdss_output_unset_device(struct omap_dss_device *out);
+
+struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
+struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
+
+void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
+               u16 *xres, u16 *yres);
+int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings);
+
+typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+
+int omapdss_compat_init(void);
+void omapdss_compat_uninit(void);
+
+static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
+{
+       return dssdev->src;
+}
+
+static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
+{
+       return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
+}
+
+struct device_node *
+omapdss_of_get_next_port(const struct device_node *parent,
+                        struct device_node *prev);
+
+struct device_node *
+omapdss_of_get_next_endpoint(const struct device_node *parent,
+                            struct device_node *prev);
+
+struct device_node *
+omapdss_of_get_first_endpoint(const struct device_node *parent);
+
+struct omap_dss_device *
+omapdss_of_find_source_for_first_ep(struct device_node *node);
 
 u32 dispc_read_irqstatus(void);
 void dispc_clear_irqstatus(u32 mask);
@@ -44,6 +909,10 @@ void dispc_mgr_set_timings(enum omap_channel channel,
                const struct omap_video_timings *timings);
 void dispc_mgr_setup(enum omap_channel channel,
                const struct omap_overlay_manager_info *info);
+u32 dispc_mgr_gamma_size(enum omap_channel channel);
+void dispc_mgr_set_gamma(enum omap_channel channel,
+                        const struct drm_color_lut *lut,
+                        unsigned int length);
 
 int dispc_ovl_enable(enum omap_plane plane, bool enable);
 bool dispc_ovl_enabled(enum omap_plane plane);
index 829232ad8c8189d4449cdaf92cae047fb3562ff8..24f859488201a26df933a9475e49de1195ed1196 100644 (file)
@@ -21,8 +21,7 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 
 static LIST_HEAD(output_list);
index f974ddcd3b6e97563f455c3b4196e6641d7ed041..0a76c89cdc2ece993672ee76c60d64432101b2c1 100644 (file)
@@ -22,8 +22,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/sched.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 
 #define PLL_CONTROL                    0x0000
@@ -76,6 +75,59 @@ struct dss_pll *dss_pll_find(const char *name)
        return NULL;
 }
 
+struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
+{
+       struct dss_pll *pll;
+
+       switch (src) {
+       default:
+       case DSS_CLK_SRC_FCK:
+               return NULL;
+
+       case DSS_CLK_SRC_HDMI_PLL:
+               return dss_pll_find("hdmi");
+
+       case DSS_CLK_SRC_PLL1_1:
+       case DSS_CLK_SRC_PLL1_2:
+       case DSS_CLK_SRC_PLL1_3:
+               pll = dss_pll_find("dsi0");
+               if (!pll)
+                       pll = dss_pll_find("video0");
+               return pll;
+
+       case DSS_CLK_SRC_PLL2_1:
+       case DSS_CLK_SRC_PLL2_2:
+       case DSS_CLK_SRC_PLL2_3:
+               pll = dss_pll_find("dsi1");
+               if (!pll)
+                       pll = dss_pll_find("video1");
+               return pll;
+       }
+}
+
+unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
+{
+       switch (src) {
+       case DSS_CLK_SRC_HDMI_PLL:
+               return 0;
+
+       case DSS_CLK_SRC_PLL1_1:
+       case DSS_CLK_SRC_PLL2_1:
+               return 0;
+
+       case DSS_CLK_SRC_PLL1_2:
+       case DSS_CLK_SRC_PLL2_2:
+               return 1;
+
+       case DSS_CLK_SRC_PLL1_3:
+       case DSS_CLK_SRC_PLL2_3:
+               return 2;
+
+       default:
+               return 0;
+       }
+}
+
 int dss_pll_enable(struct dss_pll *pll)
 {
        int r;
@@ -129,7 +181,7 @@ int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cin
        return 0;
 }
 
-bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
+bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
                unsigned long out_min, unsigned long out_max,
                dss_hsdiv_calc_func func, void *data)
 {
@@ -154,7 +206,11 @@ bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
        return false;
 }
 
-bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
+/*
+ * clkdco = clkin / n * m * 2
+ * clkoutX = clkdco / mX
+ */
+bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
                unsigned long pll_min, unsigned long pll_max,
                dss_pll_calc_func func, void *data)
 {
@@ -195,6 +251,71 @@ bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
        return false;
 }
 
+/*
+ * This calculates a PLL config that will provide the target_clkout rate
+ * for clkout. Additionally clkdco rate will be the same as clkout rate
+ * when clkout rate is >= min_clkdco.
+ *
+ * clkdco = clkin / n * m + clkin / n * mf / 262144
+ * clkout = clkdco / m2
+ */
+bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
+       unsigned long target_clkout, struct dss_pll_clock_info *cinfo)
+{
+       unsigned long fint, clkdco, clkout;
+       unsigned long target_clkdco;
+       unsigned long min_dco;
+       unsigned n, m, mf, m2, sd;
+       const struct dss_pll_hw *hw = pll->hw;
+
+       DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout);
+
+       /* Fint */
+       n = DIV_ROUND_UP(clkin, hw->fint_max);
+       fint = clkin / n;
+
+       /* adjust m2 so that the clkdco will be high enough */
+       min_dco = roundup(hw->clkdco_min, fint);
+       m2 = DIV_ROUND_UP(min_dco, target_clkout);
+       if (m2 == 0)
+               m2 = 1;
+
+       target_clkdco = target_clkout * m2;
+       m = target_clkdco / fint;
+
+       clkdco = fint * m;
+
+       /* adjust clkdco with fractional mf */
+       if (WARN_ON(target_clkdco - clkdco > fint))
+               mf = 0;
+       else
+               mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
+
+       if (mf > 0)
+               clkdco += (u32)div_u64((u64)mf * fint, 262144);
+
+       clkout = clkdco / m2;
+
+       /* sigma-delta */
+       sd = DIV_ROUND_UP(fint * m, 250000000);
+
+       DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
+               n, m, mf, m2, sd);
+       DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
+
+       cinfo->n = n;
+       cinfo->m = m;
+       cinfo->mf = mf;
+       cinfo->mX[0] = m2;
+       cinfo->sd = sd;
+
+       cinfo->fint = fint;
+       cinfo->clkdco = clkdco;
+       cinfo->clkout[0] = clkout;
+
+       return true;
+}
+
 static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
 {
        unsigned long timeout;
index 3796576dfadf8840a404427e4bd3946d914d3367..cd53566d75eb42b1f2e5a732581101b89b2a5e42 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
+#include "omapdss.h"
 #include "dss.h"
 
 struct rfbi_reg { u16 idx; };
index cd6d3bfb041d4bc07526afb26b6d5f0ed34652aa..0a96c321ce6268dffef06920eea8b11ddb9d828a 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/of.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
+#include "omapdss.h"
 #include "dss.h"
 
 static struct {
index 08a2cc778ba9692b5d66aad8d5871022867adca7..6eedf2118708123c71697b1833c1550d2f838a2e 100644 (file)
@@ -37,8 +37,7 @@
 #include <linux/of.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 #include "dss_features.h"
 
index b1ec59e42940c22ef2b63042dd4a523917bea64f..7429de928d4e59e5f5e85a7190465468d76c557b 100644 (file)
@@ -17,8 +17,7 @@
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 
-#include <video/omapdss.h>
-
+#include "omapdss.h"
 #include "dss.h"
 #include "dss_features.h"
 
@@ -108,6 +107,8 @@ static const struct dss_pll_ops dss_pll_ops = {
 };
 
 static const struct dss_pll_hw dss_dra7_video_pll_hw = {
+       .type = DSS_PLL_TYPE_A,
+
        .n_max = (1 << 8) - 1,
        .m_max = (1 << 12) - 1,
        .mX_max = (1 << 5) - 1,
@@ -124,6 +125,10 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
        .mX_lsb[0] = 21,
        .mX_msb[1] = 30,
        .mX_lsb[1] = 26,
+       .mX_msb[2] = 4,
+       .mX_lsb[2] = 0,
+       .mX_msb[3] = 9,
+       .mX_lsb[3] = 5,
 
        .has_refsel = true,
 };
index ce2d67b6a8c7f98d752c44b817d5b04147dfa165..137fe690a0da7680cb7d9cca36ba98fb9451a4d9 100644 (file)
@@ -32,7 +32,6 @@
 struct omap_connector {
        struct drm_connector base;
        struct omap_dss_device *dssdev;
-       struct drm_encoder *encoder;
        bool hdmi_mode;
 };
 
@@ -256,13 +255,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
        return ret;
 }
 
-struct drm_encoder *omap_connector_attached_encoder(
-               struct drm_connector *connector)
-{
-       struct omap_connector *omap_connector = to_omap_connector(connector);
-       return omap_connector->encoder;
-}
-
 static const struct drm_connector_funcs omap_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .reset = drm_atomic_helper_connector_reset,
@@ -276,7 +268,6 @@ static const struct drm_connector_funcs omap_connector_funcs = {
 static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
        .get_modes = omap_connector_get_modes,
        .mode_valid = omap_connector_mode_valid,
-       .best_encoder = omap_connector_attached_encoder,
 };
 
 /* initialize connector */
@@ -296,7 +287,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
                goto fail;
 
        omap_connector->dssdev = dssdev;
-       omap_connector->encoder = encoder;
 
        connector = &omap_connector->base;
 
index 075f2bb44867d37aa736f32b9a0842a31b58c7ba..180f644e861ec4efc90ffb512e18ff4bb477538f 100644 (file)
@@ -372,6 +372,20 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
        copy_timings_drm_to_omap(&omap_crtc->timings, mode);
 }
 
+static int omap_crtc_atomic_check(struct drm_crtc *crtc,
+                               struct drm_crtc_state *state)
+{
+       if (state->color_mgmt_changed && state->gamma_lut) {
+               uint length = state->gamma_lut->length /
+                       sizeof(struct drm_color_lut);
+
+               if (length < 2)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
                                   struct drm_crtc_state *old_crtc_state)
 {
@@ -384,6 +398,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
 
        WARN_ON(omap_crtc->vblank_irq.registered);
 
+       if (crtc->state->color_mgmt_changed) {
+               struct drm_color_lut *lut = NULL;
+               uint length = 0;
+
+               if (crtc->state->gamma_lut) {
+                       lut = (struct drm_color_lut *)
+                               crtc->state->gamma_lut->data;
+                       length = crtc->state->gamma_lut->length /
+                               sizeof(*lut);
+               }
+               dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
+       }
+
+       if (crtc->state->color_mgmt_changed) {
+               struct drm_color_lut *lut = NULL;
+               uint length = 0;
+
+               if (crtc->state->gamma_lut) {
+                       lut = (struct drm_color_lut *)
+                               crtc->state->gamma_lut->data;
+                       length = crtc->state->gamma_lut->length /
+                               sizeof(*lut);
+               }
+               dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
+       }
+
        if (dispc_mgr_is_enabled(omap_crtc->channel)) {
 
                DBG("%s: GO", omap_crtc->name);
@@ -460,6 +500,7 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
        .destroy = omap_crtc_destroy,
        .page_flip = drm_atomic_helper_page_flip,
+       .gamma_set = drm_atomic_helper_legacy_gamma_set,
        .set_property = drm_atomic_helper_crtc_set_property,
        .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -471,6 +512,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
        .mode_set_nofb = omap_crtc_mode_set_nofb,
        .disable = omap_crtc_disable,
        .enable = omap_crtc_enable,
+       .atomic_check = omap_crtc_atomic_check,
        .atomic_begin = omap_crtc_atomic_begin,
        .atomic_flush = omap_crtc_atomic_flush,
 };
@@ -534,6 +576,20 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
 
        drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
 
+       /* The dispc API adapts to what ever size, but the HW supports
+        * 256 element gamma table for LCDs and 1024 element table for
+        * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
+        * tables so lets use that. Size of HW gamma table can be
+        * extracted with dispc_mgr_gamma_size(). If it returns 0
+        * gamma table is not supprted.
+        */
+       if (dispc_mgr_gamma_size(channel)) {
+               uint gamma_lut_size = 256;
+
+               drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
+               drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
+       }
+
        omap_plane_install_properties(crtc->primary, &crtc->base);
 
        omap_crtcs[channel] = omap_crtc;
index 6f5fc14fc015c63f19ff1a7921de20d9d062a991..479bf24050f8a5f11375d0aa9f077cc70ec8e751 100644 (file)
@@ -17,6 +17,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/seq_file.h>
+
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 
index de275a5be1dbfd4013a3c042590d804ed46b53fa..4ceed7a9762f1a8309f84441154550878a8e4bbc 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h> /* platform_device() */
 #include <linux/sched.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/vmalloc.h>
index d86f5479345b7ac055b6e003b0296ff46a05f6d1..26c6134eb744075db77edaa279aa25cb47a56dc3 100644 (file)
@@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev,
 {
        struct omap_drm_private *priv = dev->dev_private;
        struct omap_atomic_state_commit *commit;
-       unsigned int i;
-       int ret;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       int i, ret;
 
        ret = drm_atomic_helper_prepare_planes(dev, state);
        if (ret)
@@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev,
        /* Wait until all affected CRTCs have completed previous commits and
         * mark them as pending.
         */
-       for (i = 0; i < dev->mode_config.num_crtc; ++i) {
-               if (state->crtcs[i])
-                       commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
-       }
+       for_each_crtc_in_state(state, crtc, crtc_state, i)
+               commit->crtcs |= drm_crtc_mask(crtc);
 
        wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit));
 
@@ -175,7 +174,7 @@ static int omap_atomic_commit(struct drm_device *dev,
        spin_unlock(&priv->commit.lock);
 
        /* Swap the state, this is the point of no return. */
-       drm_atomic_helper_swap_state(dev, state);
+       drm_atomic_helper_swap_state(state, true);
 
        if (nonblock)
                schedule_work(&commit->work);
@@ -203,6 +202,8 @@ static int get_connector_type(struct omap_dss_device *dssdev)
                return DRM_MODE_CONNECTOR_HDMIA;
        case OMAP_DISPLAY_TYPE_DVI:
                return DRM_MODE_CONNECTOR_DVID;
+       case OMAP_DISPLAY_TYPE_DSI:
+               return DRM_MODE_CONNECTOR_DSI;
        default:
                return DRM_MODE_CONNECTOR_Unknown;
        }
@@ -800,7 +801,6 @@ static struct drm_driver omap_drm_driver = {
        .unload = dev_unload,
        .open = dev_open,
        .lastclose = dev_lastclose,
-       .set_busid = drm_platform_set_busid,
        .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank = omap_irq_enable_vblank,
        .disable_vblank = omap_irq_disable_vblank,
index 3f823c36891267a388db643176c3662333f4a779..dcc30a98b9d4059cb11c87792f0ef0ced9dac204 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/platform_data/omap_drm.h>
 #include <linux/types.h>
 #include <linux/wait.h>
-#include <video/omapdss.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
@@ -183,7 +182,6 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
                struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
 struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
                const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
 int omap_framebuffer_pin(struct drm_framebuffer *fb);
 void omap_framebuffer_unpin(struct drm_framebuffer *fb);
 void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
@@ -231,7 +229,6 @@ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
                int x, int y, dma_addr_t *paddr);
 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
 size_t omap_gem_mmap_size(struct drm_gem_object *obj);
-int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h);
 int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
 
 struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
@@ -239,17 +236,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
 struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
                struct dma_buf *buffer);
 
-static inline int align_pitch(int pitch, int width, int bpp)
-{
-       int bytespp = (bpp + 7) / 8;
-       /* in case someone tries to feed us a completely bogus stride: */
-       pitch = max(pitch, width * bytespp);
-       /* PVR needs alignment to 8 pixels.. right now that is the most
-        * restrictive stride requirement..
-        */
-       return roundup(pitch, 8 * bytespp);
-}
-
 /* map crtc to vblank mask */
 uint32_t pipe2vbl(struct drm_crtc *crtc);
 struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
index 94ec06d3d737cd137f60cc3e632068dc9a48fe17..983c8cf2441c5fde79e1167089ccdd38964f4c38 100644 (file)
@@ -17,6 +17,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/seq_file.h>
+
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 
@@ -120,17 +122,9 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
        kfree(omap_fb);
 }
 
-static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
-               struct drm_file *file_priv, unsigned flags, unsigned color,
-               struct drm_clip_rect *clips, unsigned num_clips)
-{
-       return 0;
-}
-
 static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
        .create_handle = omap_framebuffer_create_handle,
        .destroy = omap_framebuffer_destroy,
-       .dirty = omap_framebuffer_dirty,
 };
 
 static uint32_t get_linear_addr(struct plane *plane,
@@ -318,14 +312,6 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
        mutex_unlock(&omap_fb->lock);
 }
 
-struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
-{
-       struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
-       if (p >= drm_format_num_planes(fb->pixel_format))
-               return NULL;
-       return omap_fb->planes[p].bo;
-}
-
 /* iterate thru all the connectors, returning ones that are attached
  * to the same fb..
  */
index 89da41ac64d2548508f774e2819e7a986e16087a..adb10fbe918da6f0def44ce802b652155fab83e5 100644 (file)
@@ -125,9 +125,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
 
-       mode_cmd.pitches[0] = align_pitch(
-                       mode_cmd.width * ((sizes->surface_bpp + 7) / 8),
-                       mode_cmd.width, sizes->surface_bpp);
+       mode_cmd.pitches[0] =
+                       DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
 
        fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
        if (fbdev->ywrap_enabled) {
@@ -280,9 +279,6 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
        if (ret)
                goto fini;
 
-       /* disable all the possible outputs/crtcs before entering KMS mode */
-       drm_helper_disable_unused_functions(dev);
-
        ret = drm_fb_helper_initial_config(helper, 32);
        if (ret)
                goto fini;
index b97afc281778df99853c89da9e80bda2c0759e1c..9b3f565fd8d7d223112bab6cde246a6b12068c35 100644 (file)
@@ -17,6 +17,7 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/seq_file.h>
 #include <linux/shmem_fs.h>
 #include <linux/spinlock.h>
 #include <linux/pfn_t.h>
@@ -382,18 +383,6 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
        return size;
 }
 
-/* get tiled size, returns -EINVAL if not tiled buffer */
-int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
-{
-       struct omap_gem_object *omap_obj = to_omap_bo(obj);
-       if (omap_obj->flags & OMAP_BO_TILED) {
-               *w = omap_obj->width;
-               *h = omap_obj->height;
-               return 0;
-       }
-       return -EINVAL;
-}
-
 /* -----------------------------------------------------------------------------
  * Fault Handling
  */
@@ -660,7 +649,8 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 {
        union omap_gem_size gsize;
 
-       args->pitch = align_pitch(0, args->width, args->bpp);
+       args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
        args->size = PAGE_ALIGN(args->pitch * args->height);
 
        gsize = (union omap_gem_size){
index 3a7bdf1c842b8f2b3ee9215a5a542dc7f76e3ab5..85143d1b9b3168062aef554e7041956cf4f62ec0 100644 (file)
@@ -168,6 +168,7 @@ static int panel_simple_disable(struct drm_panel *panel)
 
        if (p->backlight) {
                p->backlight->props.power = FB_BLANK_POWERDOWN;
+               p->backlight->props.state |= BL_CORE_FBBLANK;
                backlight_update_status(p->backlight);
        }
 
@@ -235,6 +236,7 @@ static int panel_simple_enable(struct drm_panel *panel)
                msleep(p->desc->delay.enable);
 
        if (p->backlight) {
+               p->backlight->props.state &= ~BL_CORE_FBBLANK;
                p->backlight->props.power = FB_BLANK_UNBLANK;
                backlight_update_status(p->backlight);
        }
@@ -964,8 +966,8 @@ static const struct panel_desc innolux_zj070na_01p = {
        .num_modes = 1,
        .bpc = 6,
        .size = {
-               .width = 1024,
-               .height = 600,
+               .width = 154,
+               .height = 90,
        },
 };
 
@@ -1017,6 +1019,51 @@ static const struct panel_desc lg_lb070wv8 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
 };
 
+static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
+       .clock = 200000,
+       .hdisplay = 1536,
+       .hsync_start = 1536 + 12,
+       .hsync_end = 1536 + 12 + 16,
+       .htotal = 1536 + 12 + 16 + 48,
+       .vdisplay = 2048,
+       .vsync_start = 2048 + 8,
+       .vsync_end = 2048 + 8 + 4,
+       .vtotal = 2048 + 8 + 4 + 8,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc lg_lp079qx1_sp0v = {
+       .modes = &lg_lp079qx1_sp0v_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 129,
+               .height = 171,
+       },
+};
+
+static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
+       .clock = 205210,
+       .hdisplay = 2048,
+       .hsync_start = 2048 + 150,
+       .hsync_end = 2048 + 150 + 5,
+       .htotal = 2048 + 150 + 5 + 5,
+       .vdisplay = 1536,
+       .vsync_start = 1536 + 3,
+       .vsync_end = 1536 + 3 + 1,
+       .vtotal = 1536 + 3 + 1 + 9,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc lg_lp097qx1_spa1 = {
+       .modes = &lg_lp097qx1_spa1_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 208,
+               .height = 147,
+       },
+};
+
 static const struct drm_display_mode lg_lp120up1_mode = {
        .clock = 162300,
        .hdisplay = 1920,
@@ -1224,6 +1271,28 @@ static const struct panel_desc qd43003c0_40 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
 };
 
+static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
+       .clock = 271560,
+       .hdisplay = 2560,
+       .hsync_start = 2560 + 48,
+       .hsync_end = 2560 + 48 + 32,
+       .htotal = 2560 + 48 + 32 + 80,
+       .vdisplay = 1600,
+       .vsync_start = 1600 + 2,
+       .vsync_end = 1600 + 2 + 5,
+       .vtotal = 1600 + 2 + 5 + 57,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc samsung_lsn122dl01_c01 = {
+       .modes = &samsung_lsn122dl01_c01_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 263,
+               .height = 164,
+       },
+};
+
 static const struct drm_display_mode samsung_ltn101nt05_mode = {
        .clock = 54030,
        .hdisplay = 1024,
@@ -1242,8 +1311,8 @@ static const struct panel_desc samsung_ltn101nt05 = {
        .num_modes = 1,
        .bpc = 6,
        .size = {
-               .width = 1024,
-               .height = 600,
+               .width = 223,
+               .height = 125,
        },
 };
 
@@ -1270,6 +1339,53 @@ static const struct panel_desc samsung_ltn140at29_301 = {
        },
 };
 
+static const struct display_timing sharp_lq101k1ly04_timing = {
+       .pixelclock = { 60000000, 65000000, 80000000 },
+       .hactive = { 1280, 1280, 1280 },
+       .hfront_porch = { 20, 20, 20 },
+       .hback_porch = { 20, 20, 20 },
+       .hsync_len = { 10, 10, 10 },
+       .vactive = { 800, 800, 800 },
+       .vfront_porch = { 4, 4, 4 },
+       .vback_porch = { 4, 4, 4 },
+       .vsync_len = { 4, 4, 4 },
+       .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc sharp_lq101k1ly04 = {
+       .timings = &sharp_lq101k1ly04_timing,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 217,
+               .height = 136,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
+};
+
+static const struct drm_display_mode sharp_lq123p1jx31_mode = {
+       .clock = 252750,
+       .hdisplay = 2400,
+       .hsync_start = 2400 + 48,
+       .hsync_end = 2400 + 48 + 32,
+       .htotal = 2400 + 48 + 32 + 80,
+       .vdisplay = 1600,
+       .vsync_start = 1600 + 3,
+       .vsync_end = 1600 + 3 + 10,
+       .vtotal = 1600 + 3 + 10 + 33,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc sharp_lq123p1jx31 = {
+       .modes = &sharp_lq123p1jx31_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 259,
+               .height = 173,
+       },
+};
+
 static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
        .clock = 33300,
        .hdisplay = 800,
@@ -1293,6 +1409,29 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
        .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
 };
 
+static const struct drm_display_mode starry_kr122ea0sra_mode = {
+       .clock = 147000,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 16,
+       .hsync_end = 1920 + 16 + 16,
+       .htotal = 1920 + 16 + 16 + 32,
+       .vdisplay = 1200,
+       .vsync_start = 1200 + 15,
+       .vsync_end = 1200 + 15 + 2,
+       .vtotal = 1200 + 15 + 2 + 18,
+       .vrefresh = 60,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc starry_kr122ea0sra = {
+       .modes = &starry_kr122ea0sra_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 263,
+               .height = 164,
+       },
+};
+
 static const struct drm_display_mode tpk_f07a_0102_mode = {
        .clock = 33260,
        .hdisplay = 800,
@@ -1456,6 +1595,12 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "lg,lb070wv8",
                .data = &lg_lb070wv8,
+       }, {
+               .compatible = "lg,lp079qx1-sp0v",
+               .data = &lg_lp079qx1_sp0v,
+       }, {
+               .compatible = "lg,lp097qx1-spa1",
+               .data = &lg_lp097qx1_spa1,
        }, {
                .compatible = "lg,lp120up1",
                .data = &lg_lp120up1,
@@ -1480,15 +1625,27 @@ static const struct of_device_id platform_of_match[] = {
        }, {
                .compatible = "qiaodian,qd43003c0-40",
                .data = &qd43003c0_40,
+       }, {
+               .compatible = "samsung,lsn122dl01-c01",
+               .data = &samsung_lsn122dl01_c01,
        }, {
                .compatible = "samsung,ltn101nt05",
                .data = &samsung_ltn101nt05,
        }, {
                .compatible = "samsung,ltn140at29-301",
                .data = &samsung_ltn140at29_301,
+       }, {
+               .compatible = "sharp,lq101k1ly04",
+               .data = &sharp_lq101k1ly04,
+       }, {
+               .compatible = "sharp,lq123p1jx31",
+               .data = &sharp_lq123p1jx31,
        }, {
                .compatible = "shelly,sca07010-bfn-lnn",
                .data = &shelly_sca07010_bfn_lnn,
+       }, {
+               .compatible = "starry,kr122ea0sra",
+               .data = &starry_kr122ea0sra,
        }, {
                .compatible = "tpk,f07a-0102",
                .data = &tpk_f07a_0102,
@@ -1701,7 +1858,6 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
        .lanes = 4,
 };
 
-
 static const struct of_device_id dsi_of_match[] = {
        {
                .compatible = "auo,b080uan01",
index 38c2bb72e4560f72aac8edf0095acf99b2b171b3..da45b11b66b8b075fb3e9e34fb64d92588383470 100644 (file)
@@ -1,12 +1,7 @@
 config DRM_QXL
        tristate "QXL virtual GPU"
        depends on DRM && PCI
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_DEFERRED_IO
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_TTM
        select CRC32
        help
index b5d4b41361bdf3d78575e60d9c95d0b9da23a0d6..04270f5d110cd3b6d85e5a758cfc8fb00359be8d 100644 (file)
@@ -203,7 +203,7 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
 bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
 {
        if (!qxl_check_idle(qdev->release_ring)) {
-               queue_work(qdev->gc_queue, &qdev->gc_work);
+               schedule_work(&qdev->gc_work);
                if (flush)
                        flush_work(&qdev->gc_work);
                return true;
index 8b5d543858927d9718a34fc5c2027f36d27325ed..ad429683fef771f2f05da51518b9f0383279d7c2 100644 (file)
@@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        struct qxl_device *qdev = dev->dev_private;
-       struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
        struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb);
        struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb);
        struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj);
@@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
        qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
                          &norect, one_clip_rect, inc);
 
-       drm_vblank_get(dev, qcrtc->index);
+       drm_crtc_vblank_get(crtc);
 
        if (event) {
                spin_lock_irqsave(&dev->event_lock, flags);
-               drm_send_vblank_event(dev, qcrtc->index, event);
+               drm_crtc_send_vblank_event(crtc, event);
                spin_unlock_irqrestore(&dev->event_lock, flags);
        }
-       drm_vblank_put(dev, qcrtc->index);
+       drm_crtc_vblank_put(crtc);
 
        ret = qxl_bo_reserve(bo, false);
        if (!ret) {
@@ -730,7 +729,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
 
        drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
        qxl_crtc->index = crtc_id;
-       drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
        drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
        return 0;
 }
index 56e1d633875e2772b6a3f767e3af44e013f3519d..ffe8853951452fa4e087e65820d21df407c7f605 100644 (file)
@@ -37,7 +37,6 @@ static int alloc_clips(struct qxl_device *qdev,
  * the qxl_clip_rects. This is *not* the same as the memory allocated
  * on the device, it is offset to qxl_clip_rects.chunk.data */
 static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
-                                             struct qxl_drawable *drawable,
                                              unsigned num_clips,
                                              struct qxl_bo *clips_bo)
 {
@@ -136,6 +135,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
                                 * correctly globaly, since that would require
                                 * tracking all of our palettes. */
        ret = qxl_bo_kmap(palette_bo, (void **)&pal);
+       if (ret)
+               return ret;
        pal->num_ents = 2;
        pal->unique = unique++;
        if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -349,7 +350,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
        if (ret)
                goto out_release_backoff;
 
-       rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
+       rects = drawable_set_clipping(qdev, num_clips, clips_bo);
        if (!rects)
                goto out_release_backoff;
 
index dc9df5fe50baae2929d53a6ba0d5dc271618d218..460bbceae297af4d3ee13f24a2b3ea764aa4c23f 100644 (file)
@@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = {
        .gem_prime_vmap = qxl_gem_prime_vmap,
        .gem_prime_vunmap = qxl_gem_prime_vunmap,
        .gem_prime_mmap = qxl_gem_prime_mmap,
-       .gem_free_object = qxl_gem_object_free,
+       .gem_free_object_unlocked = qxl_gem_object_free,
        .gem_open_object = qxl_gem_object_open,
        .gem_close_object = qxl_gem_object_close,
        .fops = &qxl_fops,
index 3ad6604b34ce76972b52fc67c593b323eee2897f..8e633caa40787d48192292c8620272dd843946cd 100644 (file)
@@ -321,7 +321,6 @@ struct qxl_device {
        struct qxl_bo *current_release_bo[3];
        int current_release_bo_offset[3];
 
-       struct workqueue_struct *gc_queue;
        struct work_struct gc_work;
 
        struct drm_property *hotplug_mode_update_property;
index 5ea57f6320b880e78b6183965751198350134e3e..df2657051afd7c7eaa6de39683b99f6190f214db 100644 (file)
@@ -131,10 +131,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
        int ret;
        int aligned_size, size;
        int height = mode_cmd->height;
-       int bpp;
-       int depth;
-
-       drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
 
        size = mode_cmd->pitches[0] * height;
        aligned_size = ALIGN(size, PAGE_SIZE);
index 2319800b7addc5af25cda7d33a5f186d7ea4c69a..e642242728c0b236caa0b066786f6b26cd6c8cb3 100644 (file)
@@ -258,7 +258,6 @@ static int qxl_device_init(struct qxl_device *qdev,
                 (unsigned long)qdev->surfaceram_size);
 
 
-       qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
        INIT_WORK(&qdev->gc_work, qxl_gc_work);
 
        return 0;
@@ -270,10 +269,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
                qxl_bo_unref(&qdev->current_release_bo[0]);
        if (qdev->current_release_bo[1])
                qxl_bo_unref(&qdev->current_release_bo[1]);
-       flush_workqueue(qdev->gc_queue);
-       destroy_workqueue(qdev->gc_queue);
-       qdev->gc_queue = NULL;
-
+       flush_work(&qdev->gc_work);
        qxl_ring_free(qdev->command_ring);
        qxl_ring_free(qdev->cursor_ring);
        qxl_ring_free(qdev->release_ring);
@@ -310,10 +306,6 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags)
        struct qxl_device *qdev;
        int r;
 
-       /* require kms */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               return -ENODEV;
-
        qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
        if (qdev == NULL)
                return -ENOMEM;
index 4efa8e261baf59546ca24eb39920bc4159358ab7..f599cd073b722cd4c9db4c2634582dbe901120d3 100644 (file)
@@ -96,7 +96,7 @@ retry:
                        return 0;
 
                if (have_drawable_releases && sc > 300) {
-                       FENCE_WARN(fence, "failed to wait on release %d "
+                       FENCE_WARN(fence, "failed to wait on release %llu "
                                          "after spincount %d\n",
                                          fence->context & ~0xf0000000, sc);
                        goto signaled;
index 0738d74c8d049faf18f0153378de60a1978a49e7..d50c9679e631a8e8fc226c382cdb9f88fee64fc9 100644 (file)
@@ -350,11 +350,19 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
                       struct ttm_mem_reg *new_mem)
 {
        struct ttm_mem_reg *old_mem = &bo->mem;
+       int ret;
+
+       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+       if (ret)
+               return ret;
+
+
        if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
                qxl_move_null(bo, new_mem);
                return 0;
        }
-       return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+       return ttm_bo_move_memcpy(bo, evict, interruptible,
+                                 no_wait_gpu, new_mem);
 }
 
 static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
index 2e216e2ea78c3535a07b761327c04c345a6667ff..a97abc8af657e879dc0f7ee3b29334b1352cffb0 100644 (file)
@@ -276,14 +276,14 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
                        atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
                atombios_blank_crtc(crtc, ATOM_DISABLE);
                if (dev->num_crtcs > radeon_crtc->crtc_id)
-                       drm_vblank_on(dev, radeon_crtc->crtc_id);
+                       drm_crtc_vblank_on(crtc);
                radeon_crtc_load_lut(crtc);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
                if (dev->num_crtcs > radeon_crtc->crtc_id)
-                       drm_vblank_off(dev, radeon_crtc->crtc_id);
+                       drm_crtc_vblank_off(crtc);
                if (radeon_crtc->enabled)
                        atombios_blank_crtc(crtc, ATOM_ENABLE);
                if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                /* use frac fb div on RS780/RS880 */
-               if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+               if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+                   && !radeon_crtc->ss_enabled)
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
@@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                        if (radeon_crtc->ss.refdiv) {
                                radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
                                radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
-                               if (ASIC_IS_AVIVO(rdev))
+                               if (rdev->family >= CHIP_RV770)
                                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                        }
                }
index 587cae4e73c9abeeb65d2187b9e700bb8f0fa442..56bb758f4e332dbdf45d321306ae7875bb7a00d8 100644 (file)
@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
                case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
                case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+               case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
                        if (dig->backlight_level == 0)
                                atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
                        else {
index ba192a35c60771cbcd9461b6d127379ca17295b2..0c1b9ff433af706c15347467a756a7f71e36746a 100644 (file)
@@ -53,6 +53,7 @@ MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
 MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
 MODULE_FIRMWARE("radeon/HAWAII_me.bin");
@@ -72,6 +73,7 @@ MODULE_FIRMWARE("radeon/hawaii_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
 MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
 MODULE_FIRMWARE("radeon/KAVERI_me.bin");
@@ -1990,12 +1992,17 @@ static int cik_init_microcode(struct radeon_device *rdev)
        int new_fw = 0;
        int err;
        int num_fw;
+       bool new_smc = false;
 
        DRM_DEBUG("\n");
 
        switch (rdev->family) {
        case CHIP_BONAIRE:
                chip_name = "BONAIRE";
+               if ((rdev->pdev->revision == 0x80) ||
+                   (rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->device == 0x665f))
+                       new_smc = true;
                new_chip_name = "bonaire";
                pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
                me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -2010,6 +2017,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_HAWAII:
                chip_name = "HAWAII";
+               if (rdev->pdev->revision == 0x80)
+                       new_smc = true;
                new_chip_name = "hawaii";
                pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
                me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -2259,7 +2268,10 @@ static int cik_init_microcode(struct radeon_device *rdev)
                        }
                }
 
-               snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+               if (new_smc)
+                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+               else
+                       snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
                err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
                if (err) {
                        snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
@@ -8354,7 +8366,8 @@ static int cik_startup(struct radeon_device *rdev)
                }
        }
        rdev->rlc.cs_data = ci_cs_data;
-       rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+       rdev->rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
+       rdev->rlc.cp_table_size += 64 * 1024; /* GDS */
        r = sumo_rlc_init(rdev);
        if (r) {
                DRM_ERROR("Failed to init rlc BOs!\n");
index 0d3f744de35aa4c7fb5a8ad3389d896a719b2a60..d960d3915408963569e6acbfe6612b70af631209 100644 (file)
@@ -2209,6 +2209,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                }
                break;
        }
+       case PACKET3_PFP_SYNC_ME:
+               if (pkt->count) {
+                       DRM_ERROR("bad PFP_SYNC_ME\n");
+                       return -EINVAL;
+               }
+               break;
        case PACKET3_SURFACE_SYNC:
                if (pkt->count != 3) {
                        DRM_ERROR("bad SURFACE_SYNC\n");
@@ -3381,6 +3387,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
        case PACKET3_MPEG_INDEX:
        case PACKET3_WAIT_REG_MEM:
        case PACKET3_MEM_WRITE:
+       case PACKET3_PFP_SYNC_ME:
        case PACKET3_SURFACE_SYNC:
        case PACKET3_EVENT_WRITE:
        case PACKET3_EVENT_WRITE_EOP:
index 0b174e14e9a688a84da9fbf575253fb72a868a99..c8e3d394cde70033798b696305da88c38a9c0c9c 100644 (file)
                 */
 #              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)
 #              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29)
+#define        PACKET3_PFP_SYNC_ME                             0x42
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
index 80b24a495d6c5805e7e12ed309b7008a77f536d5..5633ee3eb46e7d3d9a7197b2c5c909db326ba03b 100644 (file)
@@ -2386,7 +2386,7 @@ struct radeon_device {
        struct radeon_mman              mman;
        struct radeon_fence_driver      fence_drv[RADEON_NUM_RINGS];
        wait_queue_head_t               fence_queue;
-       unsigned                        fence_context;
+       u64                             fence_context;
        struct mutex                    ring_lock;
        struct radeon_ring              ring[RADEON_NUM_RINGS];
        bool                            ib_pool_ready;
index 95f4fea893021cb8f233244404fed6878742dd3b..86dcdf38b732176ec94865d213648953e344cf04 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/slab.h>
 #include <linux/acpi.h>
 #include <linux/pci.h>
+#include <linux/delay.h>
 
 #include "radeon_acpi.h"
 
@@ -27,6 +28,7 @@ struct radeon_atpx_functions {
 struct radeon_atpx {
        acpi_handle handle;
        struct radeon_atpx_functions functions;
+       bool is_hybrid;
 };
 
 static struct radeon_atpx_priv {
@@ -62,6 +64,14 @@ bool radeon_has_atpx(void) {
        return radeon_atpx_priv.atpx_detected;
 }
 
+bool radeon_has_atpx_dgpu_power_cntl(void) {
+       return radeon_atpx_priv.atpx.functions.power_cntl;
+}
+
+bool radeon_is_atpx_hybrid(void) {
+       return radeon_atpx_priv.atpx.is_hybrid;
+}
+
 /**
  * radeon_atpx_call - call an ATPX method
  *
@@ -141,18 +151,12 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
  */
 static int radeon_atpx_validate(struct radeon_atpx *atpx)
 {
-       /* make sure required functions are enabled */
-       /* dGPU power control is required */
-       if (atpx->functions.power_cntl == false) {
-               printk("ATPX dGPU power cntl not present, forcing\n");
-               atpx->functions.power_cntl = true;
-       }
+       u32 valid_bits = 0;
 
        if (atpx->functions.px_params) {
                union acpi_object *info;
                struct atpx_px_params output;
                size_t size;
-               u32 valid_bits;
 
                info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
                if (!info)
@@ -171,19 +175,42 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
                memcpy(&output, info->buffer.pointer, size);
 
                valid_bits = output.flags & output.valid_flags;
-               /* if separate mux flag is set, mux controls are required */
-               if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
-                       atpx->functions.i2c_mux_cntl = true;
-                       atpx->functions.disp_mux_cntl = true;
-               }
-               /* if any outputs are muxed, mux controls are required */
-               if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
-                                 ATPX_TV_SIGNAL_MUXED |
-                                 ATPX_DFP_SIGNAL_MUXED))
-                       atpx->functions.disp_mux_cntl = true;
 
                kfree(info);
        }
+
+       /* if separate mux flag is set, mux controls are required */
+       if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
+               atpx->functions.i2c_mux_cntl = true;
+               atpx->functions.disp_mux_cntl = true;
+       }
+       /* if any outputs are muxed, mux controls are required */
+       if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
+                         ATPX_TV_SIGNAL_MUXED |
+                         ATPX_DFP_SIGNAL_MUXED))
+               atpx->functions.disp_mux_cntl = true;
+
+       /* some bioses set these bits rather than flagging power_cntl as supported */
+       if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
+                         ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
+               atpx->functions.power_cntl = true;
+
+       atpx->is_hybrid = false;
+       if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
+               printk("ATPX Hybrid Graphics\n");
+#if 1
+               /* This is a temporary hack until the D3 cold support
+                * makes it upstream.  The ATPX power_control method seems
+                * to still work on even if the system should be using
+                * the new standardized hybrid D3 cold ACPI interface.
+                */
+               atpx->functions.power_cntl = true;
+#else
+               atpx->functions.power_cntl = false;
+#endif
+               atpx->is_hybrid = true;
+       }
+
        return 0;
 }
 
@@ -258,6 +285,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
                if (!info)
                        return -EIO;
                kfree(info);
+
+               /* 200ms delay is required after off */
+               if (state == 0)
+                       msleep(200);
        }
        return 0;
 }
index 81a63d7f5cd9df6e41ef7748d4635a2cddf5b508..b79f3b002471a5c45b9ecbb109bafb075c973d50 100644 (file)
@@ -2064,7 +2064,6 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                           RADEON_OUTPUT_CSC_BYPASS);
                        /* no HPD on analog connectors */
                        radeon_connector->hpd.hpd = RADEON_HPD_NONE;
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
                        connector->interlace_allowed = true;
                        connector->doublescan_allowed = true;
                        break;
@@ -2314,8 +2313,10 @@ radeon_add_atom_connector(struct drm_device *dev,
        }
 
        if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
-               if (i2c_bus->valid)
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+               if (i2c_bus->valid) {
+                       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                                           DRM_CONNECTOR_POLL_DISCONNECT;
+               }
        } else
                connector->polled = DRM_CONNECTOR_POLL_HPD;
 
@@ -2391,7 +2392,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
                                              1);
                /* no HPD on analog connectors */
                radeon_connector->hpd.hpd = RADEON_HPD_NONE;
-               connector->polled = DRM_CONNECTOR_POLL_CONNECT;
                connector->interlace_allowed = true;
                connector->doublescan_allowed = true;
                break;
@@ -2476,10 +2476,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
        }
 
        if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
-               if (i2c_bus->valid)
-                       connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+               if (i2c_bus->valid) {
+                       connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+                                           DRM_CONNECTOR_POLL_DISCONNECT;
+               }
        } else
                connector->polled = DRM_CONNECTOR_POLL_HPD;
+
        connector->display_info.subpixel_order = subpixel_order;
        drm_connector_register(connector);
 }
index e721e6b2766e4973ca3e0647e9e75c6b10f85622..a00dd2f745271ec760bff14d5455571388313ebd 100644 (file)
@@ -30,6 +30,7 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/radeon_drm.h>
+#include <linux/pm_runtime.h>
 #include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 #include <linux/efi.h>
@@ -630,6 +631,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 /*
  * GPU helpers function.
  */
+
+/**
+ * radeon_device_is_virtual - check if we are running is a virtual environment
+ *
+ * Check if the asic has been passed through to a VM (all asics).
+ * Used at driver startup.
+ * Returns true if virtual or false if not.
+ */
+static bool radeon_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+       return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+       return false;
+#endif
+}
+
 /**
  * radeon_card_posted - check if the hw has already been initialized
  *
@@ -643,6 +661,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
 {
        uint32_t reg;
 
+       /* for pass through, always force asic_init */
+       if (radeon_device_is_virtual())
+               return false;
+
        /* required for EFI mode on macbook2,1 which uses an r5xx asic */
        if (efi_enabled(EFI_BOOT) &&
            (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
@@ -1505,6 +1527,9 @@ int radeon_device_init(struct radeon_device *rdev,
        return 0;
 
 failed:
+       /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
+       if (radeon_is_px(ddev))
+               pm_runtime_put_noidle(ddev->dev);
        if (runtime)
                vga_switcheroo_fini_domain_pm_ops(rdev->dev);
        return r;
@@ -1631,7 +1656,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
        radeon_agp_suspend(rdev);
 
        pci_save_state(dev->pdev);
-       if (freeze && rdev->family >= CHIP_R600) {
+       if (freeze && rdev->family >= CHIP_CEDAR) {
                rdev->asic->asic_reset(rdev, true);
                pci_restore_state(dev->pdev);
        } else if (suspend) {
index 6a41b49826470f8cffe99fb888c4f6a4529ab787..5f1cd695c9656e5d558fa4ed2c90203c39e7bfc0 100644 (file)
@@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
        *blue = radeon_crtc->lut_b[regno] << 6;
 }
 
-static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
-                                 u16 *blue, uint32_t start, uint32_t size)
+static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                u16 *blue, uint32_t size)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       int end = (start + size > 256) ? 256 : start + size, i;
+       int i;
 
        /* userspace palettes are always correct as is */
-       for (i = start; i < end; i++) {
+       for (i = 0; i < size; i++) {
                radeon_crtc->lut_r[i] = red[i] >> 6;
                radeon_crtc->lut_g[i] = green[i] >> 6;
                radeon_crtc->lut_b[i] = blue[i] >> 6;
        }
        radeon_crtc_load_lut(crtc);
+
+       return 0;
 }
 
 static void radeon_crtc_destroy(struct drm_crtc *crtc)
@@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
 
        spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
 
-       drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+       drm_crtc_vblank_put(&radeon_crtc->base);
        radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
        queue_work(radeon_crtc->flip_queue, &work->unpin_work);
 }
@@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        }
        work->base = base;
 
-       r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
+       r = drm_crtc_vblank_get(crtc);
        if (r) {
                DRM_ERROR("failed to get vblank before flip\n");
                goto pflip_cleanup;
@@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        return 0;
 
 vblank_cleanup:
-       drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
+       drm_crtc_vblank_put(crtc);
 
 pflip_cleanup:
        if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
@@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
        pm_runtime_put_autosuspend(dev->dev);
        return ret;
 }
+
 static const struct drm_crtc_funcs radeon_crtc_funcs = {
        .cursor_set2 = radeon_crtc_cursor_set2,
        .cursor_move = radeon_crtc_cursor_move,
@@ -1708,6 +1711,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
                radeon_afmt_fini(rdev);
                drm_kms_helper_poll_fini(rdev->ddev);
                radeon_hpd_fini(rdev);
+               drm_crtc_force_disable_all(rdev->ddev);
                drm_mode_config_cleanup(rdev->ddev);
                rdev->mode_info.mode_config_initialized = false;
        }
index b55aa740171f1d1d9cecdbf55e97d51098af0f22..c01a7c6abb491c4227ad02fd6525813a7a9d3d55 100644 (file)
 #include "radeon_drv.h"
 
 #include <drm/drm_pciids.h>
-#include <linux/apple-gmux.h>
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
-#include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 #include <drm/drm_gem.h>
 
  *   2.43.0 - RADEON_INFO_GPU_RESET_COUNTER
  *   2.44.0 - SET_APPEND_CNT packet3 support
  *   2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
+ *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       45
+#define KMS_DRIVER_MINOR       46
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -164,9 +163,13 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
 #if defined(CONFIG_VGA_SWITCHEROO)
 void radeon_register_atpx_handler(void);
 void radeon_unregister_atpx_handler(void);
+bool radeon_has_atpx_dgpu_power_cntl(void);
+bool radeon_is_atpx_hybrid(void);
 #else
 static inline void radeon_register_atpx_handler(void) {}
 static inline void radeon_unregister_atpx_handler(void) {}
+static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+static inline bool radeon_is_atpx_hybrid(void) { return false; }
 #endif
 
 int radeon_no_wb;
@@ -340,13 +343,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
        if (ret == -EPROBE_DEFER)
                return ret;
 
-       /*
-        * apple-gmux is needed on dual GPU MacBook Pro
-        * to probe the panel if we're the inactive GPU.
-        */
-       if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
-           apple_gmux_present() && pdev != vga_default_device() &&
-           !vga_switcheroo_handler_flags())
+       if (vga_switcheroo_client_probe_defer(pdev))
                return -EPROBE_DEFER;
 
        /* Get rid of things like offb */
@@ -412,7 +409,10 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
        pci_save_state(pdev);
        pci_disable_device(pdev);
        pci_ignore_hotplug(pdev);
-       pci_set_power_state(pdev, PCI_D3cold);
+       if (radeon_is_atpx_hybrid())
+               pci_set_power_state(pdev, PCI_D3cold);
+       else if (!radeon_has_atpx_dgpu_power_cntl())
+               pci_set_power_state(pdev, PCI_D3hot);
        drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
 
        return 0;
@@ -429,7 +429,9 @@ static int radeon_pmops_runtime_resume(struct device *dev)
 
        drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
-       pci_set_power_state(pdev, PCI_D0);
+       if (radeon_is_atpx_hybrid() ||
+           !radeon_has_atpx_dgpu_power_cntl())
+               pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        ret = pci_enable_device(pdev);
        if (ret)
index 414953c46a38fad5327b279883e8ba43fbc193f3..835563c1f0edf65e5fbf5545aa039738046d48f4 100644 (file)
@@ -63,7 +63,10 @@ int radeon_driver_unload_kms(struct drm_device *dev)
        if (rdev->rmmio == NULL)
                goto done_free;
 
-       pm_runtime_get_sync(dev->dev);
+       if (radeon_is_px(dev)) {
+               pm_runtime_get_sync(dev->dev);
+               pm_runtime_forbid(dev->dev);
+       }
 
        radeon_kfd_device_fini(rdev);
 
index 478d4099b0d0e2b2d995d47bb74322ab46adf73b..d0de4022fff97ece38ca92011372efd83ed20308 100644 (file)
@@ -332,14 +332,14 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
                        WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
                }
                if (dev->num_crtcs > radeon_crtc->crtc_id)
-                       drm_vblank_on(dev, radeon_crtc->crtc_id);
+                       drm_crtc_vblank_on(crtc);
                radeon_crtc_load_lut(crtc);
                break;
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
                if (dev->num_crtcs > radeon_crtc->crtc_id)
-                       drm_vblank_off(dev, radeon_crtc->crtc_id);
+                       drm_crtc_vblank_off(crtc);
                if (radeon_crtc->crtc_id)
                        WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
                else {
index 38226d925a5b1e0b034a7288384df7cdf1778c32..4b6542538ff91581272deadb6e971c50f2429033 100644 (file)
@@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
 
 static void radeon_pm_set_clocks(struct radeon_device *rdev)
 {
+       struct drm_crtc *crtc;
        int i, r;
 
        /* no need to take locks, etc. if nothing's going to change */
@@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
        radeon_unmap_vram_bos(rdev);
 
        if (rdev->irq.installed) {
-               for (i = 0; i < rdev->num_crtc; i++) {
+               i = 0;
+               drm_for_each_crtc(crtc, rdev->ddev) {
                        if (rdev->pm.active_crtcs & (1 << i)) {
                                /* This can fail if a modeset is in progress */
-                               if (drm_vblank_get(rdev->ddev, i) == 0)
+                               if (drm_crtc_vblank_get(crtc) == 0)
                                        rdev->pm.req_vblank |= (1 << i);
                                else
                                        DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
                                                         i);
                        }
+                       i++;
                }
        }
 
        radeon_set_power_state(rdev);
 
        if (rdev->irq.installed) {
-               for (i = 0; i < rdev->num_crtc; i++) {
+               i = 0;
+               drm_for_each_crtc(crtc, rdev->ddev) {
                        if (rdev->pm.req_vblank & (1 << i)) {
                                rdev->pm.req_vblank &= ~(1 << i);
-                               drm_vblank_put(rdev->ddev, i);
+                               drm_crtc_vblank_put(crtc);
                        }
+                       i++;
                }
        }
 
index 590b0377fbe267f859b7cc7649b309b9c34d4436..ffdad81ef9647cbc96612e3d0dc4d41b85009dff 100644 (file)
@@ -300,8 +300,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
        if (IS_ERR(fence))
                return PTR_ERR(fence);
 
-       r = ttm_bo_move_accel_cleanup(bo, &fence->base,
-                                     evict, no_wait_gpu, new_mem);
+       r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
        radeon_fence_unref(&fence);
        return r;
 }
@@ -403,6 +402,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
        struct ttm_mem_reg *old_mem = &bo->mem;
        int r;
 
+       r = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+       if (r)
+               return r;
+
        /* Can't move a pinned BO */
        rbo = container_of(bo, struct radeon_bo, tbo);
        if (WARN_ON_ONCE(rbo->pin_count > 0))
@@ -441,7 +444,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
 
        if (r) {
 memcpy:
-               r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
+               r = ttm_bo_move_memcpy(bo, evict, interruptible,
+                                      no_wait_gpu, new_mem);
                if (r) {
                        return r;
                }
index b30e719dd56d87f1a195c443de85b8fc48a462b7..2523ca96c6c7d5e36fd9633cab723f2656d241fb 100644 (file)
@@ -50,6 +50,7 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
 MODULE_FIRMWARE("radeon/tahiti_mc.bin");
 MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
 MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -65,6 +66,7 @@ MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
 MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
 MODULE_FIRMWARE("radeon/VERDE_me.bin");
@@ -80,6 +82,7 @@ MODULE_FIRMWARE("radeon/verde_ce.bin");
 MODULE_FIRMWARE("radeon/verde_mc.bin");
 MODULE_FIRMWARE("radeon/verde_rlc.bin");
 MODULE_FIRMWARE("radeon/verde_smc.bin");
+MODULE_FIRMWARE("radeon/verde_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
 MODULE_FIRMWARE("radeon/OLAND_me.bin");
@@ -95,6 +98,7 @@ MODULE_FIRMWARE("radeon/oland_ce.bin");
 MODULE_FIRMWARE("radeon/oland_mc.bin");
 MODULE_FIRMWARE("radeon/oland_rlc.bin");
 MODULE_FIRMWARE("radeon/oland_smc.bin");
+MODULE_FIRMWARE("radeon/oland_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
 MODULE_FIRMWARE("radeon/HAINAN_me.bin");
@@ -110,6 +114,7 @@ MODULE_FIRMWARE("radeon/hainan_ce.bin");
 MODULE_FIRMWARE("radeon/hainan_mc.bin");
 MODULE_FIRMWARE("radeon/hainan_rlc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
 
 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
 static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1653,12 +1658,16 @@ static int si_init_microcode(struct radeon_device *rdev)
        char fw_name[30];
        int err;
        int new_fw = 0;
+       bool new_smc = false;
 
        DRM_DEBUG("\n");
 
        switch (rdev->family) {
        case CHIP_TAHITI:
                chip_name = "TAHITI";
+               /* XXX: figure out which Tahitis need the new ucode */
+               if (0)
+                       new_smc = true;
                new_chip_name = "tahiti";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1670,6 +1679,13 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_PITCAIRN:
                chip_name = "PITCAIRN";
+               if ((rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->device == 0x6810) ||
+                   (rdev->pdev->device == 0x6811) ||
+                   (rdev->pdev->device == 0x6816) ||
+                   (rdev->pdev->device == 0x6817) ||
+                   (rdev->pdev->device == 0x6806))
+                       new_smc = true;
                new_chip_name = "pitcairn";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1681,6 +1697,16 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_VERDE:
                chip_name = "VERDE";
+               if ((rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->revision == 0x87) ||
+                   (rdev->pdev->device == 0x6820) ||
+                   (rdev->pdev->device == 0x6821) ||
+                   (rdev->pdev->device == 0x6822) ||
+                   (rdev->pdev->device == 0x6823) ||
+                   (rdev->pdev->device == 0x682A) ||
+                   (rdev->pdev->device == 0x682B))
+                       new_smc = true;
                new_chip_name = "verde";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1692,6 +1718,13 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_OLAND:
                chip_name = "OLAND";
+               if ((rdev->pdev->revision == 0xC7) ||
+                   (rdev->pdev->revision == 0x80) ||
+                   (rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->device == 0x6604) ||
+                   (rdev->pdev->device == 0x6605))
+                       new_smc = true;
                new_chip_name = "oland";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1702,6 +1735,13 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_HAINAN:
                chip_name = "HAINAN";
+               if ((rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->revision == 0xC3) ||
+                   (rdev->pdev->device == 0x6664) ||
+                   (rdev->pdev->device == 0x6665) ||
+                   (rdev->pdev->device == 0x6667))
+                       new_smc = true;
                new_chip_name = "hainan";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1847,7 +1887,10 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
+       if (new_smc)
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
        err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
        if (err) {
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
index 7fc3ca5ce6c7365065b1c563ac05137bba1640ea..4c2fd056dd6da6c207e5ac944788cbc3ac2b3028 100644 (file)
@@ -6,7 +6,6 @@ config DRM_RCAR_DU
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
-       select DRM_KMS_FB_HELPER
        select VIDEOMODE_HELPERS
        help
          Choose this option if you have an R-Car chipset.
index 827711e282269ea2656e6b16d8fafee43e20c3a0..d3b44651061aa1799988d3d741986c8ed8bc644d 100644 (file)
@@ -7,8 +7,8 @@ rcar-du-drm-y := rcar_du_crtc.o \
                 rcar_du_plane.o \
                 rcar_du_vgacon.o
 
-rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI)    += rcar_du_hdmicon.o \
-                                          rcar_du_hdmienc.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI)    += rcar_du_hdmienc.o
+
 rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS)    += rcar_du_lvdsenc.o
 
 rcar-du-drm-$(CONFIG_DRM_RCAR_VSP)     += rcar_du_vsp.o
index 0d8bdda736f9e49d3fe173a219b93642e3e9c68c..e39fcef2e033c71dd969294f79420a981944b245 100644 (file)
@@ -552,7 +552,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
        rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
 
        if (status & DSSR_FRM) {
-               drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
+               drm_crtc_handle_vblank(&rcrtc->crtc);
                rcar_du_crtc_finish_page_flip(rcrtc);
                ret = IRQ_HANDLED;
        }
index fb9242d27883d09dde905891ed0118fe75a95c4e..899ef7a2a7b4eaff0748c41e7a8e03cee201a2ee 100644 (file)
@@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = {
        .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = rcar_du_enable_vblank,
        .disable_vblank         = rcar_du_disable_vblank,
-       .gem_free_object        = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
@@ -278,7 +278,6 @@ static int rcar_du_remove(struct platform_device *pdev)
        struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
        struct drm_device *ddev = rcdu->ddev;
 
-       drm_connector_unregister_all(ddev);
        drm_dev_unregister(ddev);
 
        if (rcdu->fbdev)
@@ -320,8 +319,6 @@ static int rcar_du_probe(struct platform_device *pdev)
        if (!ddev)
                return -ENOMEM;
 
-       drm_dev_set_unique(ddev, dev_name(&pdev->dev));
-
        rcdu->ddev = ddev;
        ddev->dev_private = rcdu;
 
@@ -339,15 +336,15 @@ static int rcar_du_probe(struct platform_device *pdev)
         * disabled for all CRTCs.
         */
        ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to initialize vblank\n");
+       if (ret < 0)
                goto error;
-       }
 
        /* DRM/KMS objects */
        ret = rcar_du_modeset_init(rcdu);
        if (ret < 0) {
-               dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to initialize DRM/KMS (%d)\n", ret);
                goto error;
        }
 
@@ -360,10 +357,6 @@ static int rcar_du_probe(struct platform_device *pdev)
        if (ret)
                goto error;
 
-       ret = drm_connector_register_all(ddev);
-       if (ret < 0)
-               goto error;
-
        DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
 
        return 0;
index 4e939e41f030caeb82dcd705d98f7cbce7ff9f3d..ab8645c57e2d5f94a0a36b41910ef1db6861d638 100644 (file)
 
 #include "rcar_du_drv.h"
 #include "rcar_du_encoder.h"
-#include "rcar_du_hdmicon.h"
 #include "rcar_du_hdmienc.h"
 #include "rcar_du_kms.h"
 #include "rcar_du_lvdscon.h"
 #include "rcar_du_lvdsenc.h"
 #include "rcar_du_vgacon.h"
 
-/* -----------------------------------------------------------------------------
- * Common connector functions
- */
-
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector)
-{
-       struct rcar_du_connector *rcon = to_rcar_connector(connector);
-
-       return rcar_encoder_to_drm_encoder(rcon->encoder);
-}
-
 /* -----------------------------------------------------------------------------
  * Encoder
  */
@@ -186,7 +173,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                break;
 
        case DRM_MODE_ENCODER_TMDS:
-               ret = rcar_du_hdmi_connector_init(rcdu, renc);
+               /* connector managed by the bridge driver */
                break;
 
        default:
index 719b6f2a031ccf0756ebd718307196b23d3f22b6..7fc10a9c34c3c0ee803d3ad95fb39c1f1f0a080f 100644 (file)
@@ -15,7 +15,6 @@
 #define __RCAR_DU_ENCODER_H__
 
 #include <drm/drm_crtc.h>
-#include <drm/drm_encoder_slave.h>
 
 struct rcar_du_device;
 struct rcar_du_hdmienc;
@@ -30,16 +29,16 @@ enum rcar_du_encoder_type {
 };
 
 struct rcar_du_encoder {
-       struct drm_encoder_slave slave;
+       struct drm_encoder base;
        enum rcar_du_output output;
        struct rcar_du_hdmienc *hdmi;
        struct rcar_du_lvdsenc *lvds;
 };
 
 #define to_rcar_encoder(e) \
-       container_of(e, struct rcar_du_encoder, slave.base)
+       container_of(e, struct rcar_du_encoder, base)
 
-#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base)
+#define rcar_encoder_to_drm_encoder(e) (&(e)->base)
 
 struct rcar_du_connector {
        struct drm_connector connector;
@@ -49,9 +48,6 @@ struct rcar_du_connector {
 #define to_rcar_connector(c) \
        container_of(c, struct rcar_du_connector, connector)
 
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector);
-
 int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                         enum rcar_du_encoder_type type,
                         enum rcar_du_output output,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
deleted file mode 100644 (file)
index 6c92714..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * R-Car Display Unit HDMI Connector
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder_slave.h>
-
-#include "rcar_du_drv.h"
-#include "rcar_du_encoder.h"
-#include "rcar_du_hdmicon.h"
-#include "rcar_du_kms.h"
-
-#define to_slave_funcs(e)      (to_rcar_encoder(e)->slave.slave_funcs)
-
-static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
-{
-       struct rcar_du_connector *con = to_rcar_connector(connector);
-       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
-       const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
-       if (sfuncs->get_modes == NULL)
-               return 0;
-
-       return sfuncs->get_modes(encoder, connector);
-}
-
-static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
-                                            struct drm_display_mode *mode)
-{
-       struct rcar_du_connector *con = to_rcar_connector(connector);
-       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
-       const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
-       if (sfuncs->mode_valid == NULL)
-               return MODE_OK;
-
-       return sfuncs->mode_valid(encoder, mode);
-}
-
-static const struct drm_connector_helper_funcs connector_helper_funcs = {
-       .get_modes = rcar_du_hdmi_connector_get_modes,
-       .mode_valid = rcar_du_hdmi_connector_mode_valid,
-       .best_encoder = rcar_du_connector_best_encoder,
-};
-
-static enum drm_connector_status
-rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
-{
-       struct rcar_du_connector *con = to_rcar_connector(connector);
-       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
-       const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
-       if (sfuncs->detect == NULL)
-               return connector_status_unknown;
-
-       return sfuncs->detect(encoder, connector);
-}
-
-static const struct drm_connector_funcs connector_funcs = {
-       .dpms = drm_atomic_helper_connector_dpms,
-       .reset = drm_atomic_helper_connector_reset,
-       .detect = rcar_du_hdmi_connector_detect,
-       .fill_modes = drm_helper_probe_single_connector_modes,
-       .destroy = drm_connector_cleanup,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
-                               struct rcar_du_encoder *renc)
-{
-       struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
-       struct rcar_du_connector *rcon;
-       struct drm_connector *connector;
-       int ret;
-
-       rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
-       if (rcon == NULL)
-               return -ENOMEM;
-
-       connector = &rcon->connector;
-       connector->display_info.width_mm = 0;
-       connector->display_info.height_mm = 0;
-       connector->interlace_allowed = true;
-       connector->polled = DRM_CONNECTOR_POLL_HPD;
-
-       ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
-                                DRM_MODE_CONNECTOR_HDMIA);
-       if (ret < 0)
-               return ret;
-
-       drm_connector_helper_add(connector, &connector_helper_funcs);
-
-       connector->dpms = DRM_MODE_DPMS_OFF;
-       drm_object_property_set_value(&connector->base,
-               rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
-
-       ret = drm_mode_connector_attach_encoder(connector, encoder);
-       if (ret < 0)
-               return ret;
-
-       rcon->encoder = renc;
-
-       return 0;
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
deleted file mode 100644 (file)
index 87daa94..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * R-Car Display Unit HDMI Connector
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_HDMICON_H__
-#define __RCAR_DU_HDMICON_H__
-
-struct rcar_du_device;
-struct rcar_du_encoder;
-
-#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
-int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
-                               struct rcar_du_encoder *renc);
-#else
-static inline int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
-                                             struct rcar_du_encoder *renc)
-{
-       return -ENOSYS;
-}
-#endif
-
-#endif /* __RCAR_DU_HDMICON_H__ */
index 461662d231e2724b782de2c73f5537dbe2fef052..4de3ff0dbebd8cba5bf4a761ae9d97bb37f86c00 100644 (file)
@@ -16,7 +16,6 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
-#include <drm/drm_encoder_slave.h>
 
 #include "rcar_du_drv.h"
 #include "rcar_du_encoder.h"
 
 struct rcar_du_hdmienc {
        struct rcar_du_encoder *renc;
-       struct device *dev;
        bool enabled;
 };
 
 #define to_rcar_hdmienc(e)     (to_rcar_encoder(e)->hdmi)
-#define to_slave_funcs(e)      (to_rcar_encoder(e)->slave.slave_funcs)
 
 static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
 {
        struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-       const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
-       if (sfuncs->dpms)
-               sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
 
        if (hdmienc->renc->lvds)
                rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
@@ -50,15 +43,11 @@ static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
 static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
 {
        struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-       const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
 
        if (hdmienc->renc->lvds)
                rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
                                       true);
 
-       if (sfuncs->dpms)
-               sfuncs->dpms(encoder, DRM_MODE_DPMS_ON);
-
        hdmienc->enabled = true;
 }
 
@@ -67,29 +56,21 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
                                        struct drm_connector_state *conn_state)
 {
        struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-       const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
        struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
-       const struct drm_display_mode *mode = &crtc_state->mode;
 
        if (hdmienc->renc->lvds)
                rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds,
                                             adjusted_mode);
 
-       if (sfuncs->mode_fixup == NULL)
-               return 0;
-
-       return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL;
+       return 0;
 }
 
+
 static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
                                     struct drm_display_mode *mode,
                                     struct drm_display_mode *adjusted_mode)
 {
        struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-       const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
-
-       if (sfuncs->mode_set)
-               sfuncs->mode_set(encoder, mode, adjusted_mode);
 
        rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
 }
@@ -109,7 +90,6 @@ static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
                rcar_du_hdmienc_disable(encoder);
 
        drm_encoder_cleanup(encoder);
-       put_device(hdmienc->dev);
 }
 
 static const struct drm_encoder_funcs encoder_funcs = {
@@ -120,8 +100,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
                         struct rcar_du_encoder *renc, struct device_node *np)
 {
        struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
-       struct drm_i2c_encoder_driver *driver;
-       struct i2c_client *i2c_slave;
+       struct drm_bridge *bridge;
        struct rcar_du_hdmienc *hdmienc;
        int ret;
 
@@ -129,44 +108,29 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
        if (hdmienc == NULL)
                return -ENOMEM;
 
-       /* Locate the slave I2C device and driver. */
-       i2c_slave = of_find_i2c_device_by_node(np);
-       if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
-               dev_dbg(rcdu->dev,
-                       "can't get I2C slave for %s, deferring probe\n",
-                       of_node_full_name(np));
+       /* Locate drm bridge from the hdmi encoder DT node */
+       bridge = of_drm_find_bridge(np);
+       if (!bridge)
                return -EPROBE_DEFER;
-       }
-
-       hdmienc->dev = &i2c_slave->dev;
-
-       if (hdmienc->dev->driver == NULL) {
-               dev_dbg(rcdu->dev,
-                       "I2C slave %s not probed yet, deferring probe\n",
-                       dev_name(hdmienc->dev));
-               ret = -EPROBE_DEFER;
-               goto error;
-       }
-
-       /* Initialize the slave encoder. */
-       driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver));
-       ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave);
-       if (ret < 0)
-               goto error;
 
        ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
                               DRM_MODE_ENCODER_TMDS, NULL);
        if (ret < 0)
-               goto error;
+               return ret;
 
        drm_encoder_helper_add(encoder, &encoder_helper_funcs);
 
        renc->hdmi = hdmienc;
        hdmienc->renc = renc;
 
-       return 0;
+       /* Link drm_bridge to encoder */
+       bridge->encoder = encoder;
+
+       ret = drm_bridge_attach(rcdu->ddev, bridge);
+       if (ret) {
+               drm_encoder_cleanup(encoder);
+               return ret;
+       }
 
-error:
-       put_device(hdmienc->dev);
-       return ret;
+       return 0;
 }
index e70a4f33d970883f2dae665dd4d82ab233f26074..6bb032d8ac6bbd28dcce4df4c4f56374678ebab7 100644 (file)
@@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
 {
        struct rcar_du_device *rcdu = dev->dev_private;
        struct rcar_du_commit *commit;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
        unsigned int i;
        int ret;
 
@@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
        /* Wait until all affected CRTCs have completed previous commits and
         * mark them as pending.
         */
-       for (i = 0; i < dev->mode_config.num_crtc; ++i) {
-               if (state->crtcs[i])
-                       commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
-       }
+       for_each_crtc_in_state(state, crtc, crtc_state, i)
+               commit->crtcs |= drm_crtc_mask(crtc);
 
        spin_lock(&rcdu->commit.wait.lock);
        ret = wait_event_interruptible_locked(rcdu->commit.wait,
@@ -327,7 +327,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
        }
 
        /* Swap the state, this is the point of no return. */
-       drm_atomic_helper_swap_state(dev, state);
+       drm_atomic_helper_swap_state(state, true);
 
        if (nonblock)
                schedule_work(&commit->work);
index e905f5da7aaac0b9fb85b9c2eba6b70e17a63c4a..6afd0af312baa3d7cf59576e6fd8f4c4f4f041f4 100644 (file)
@@ -59,7 +59,6 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
 
 static const struct drm_connector_helper_funcs connector_helper_funcs = {
        .get_modes = rcar_du_lvds_connector_get_modes,
-       .best_encoder = rcar_du_connector_best_encoder,
 };
 
 static enum drm_connector_status
index d445e67f78e1a8378164deb11c5f2aa1f36a61d0..bfe31ca870cc72173456271020472f7e4936fca0 100644 (file)
@@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
        bool needs_realloc = false;
        unsigned int groups = 0;
        unsigned int i;
+       struct drm_plane *drm_plane;
+       struct drm_plane_state *drm_plane_state;
 
        /* Check if hardware planes need to be reallocated. */
-       for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+       for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
                struct rcar_du_plane_state *plane_state;
                struct rcar_du_plane *plane;
                unsigned int index;
 
-               if (!state->planes[i])
-                       continue;
-
-               plane = to_rcar_plane(state->planes[i]);
-               plane_state = to_rcar_plane_state(state->plane_states[i]);
+               plane = to_rcar_plane(drm_plane);
+               plane_state = to_rcar_plane_state(drm_plane_state);
 
                dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
                        plane->group->index, plane - plane->group->planes);
@@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
        }
 
        /* Reallocate hardware planes for each plane that needs it. */
-       for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
+       for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
                struct rcar_du_plane_state *plane_state;
                struct rcar_du_plane *plane;
                unsigned int crtc_planes;
                unsigned int free;
                int idx;
 
-               if (!state->planes[i])
-                       continue;
-
-               plane = to_rcar_plane(state->planes[i]);
-               plane_state = to_rcar_plane_state(state->plane_states[i]);
+               plane = to_rcar_plane(drm_plane);
+               plane_state = to_rcar_plane_state(drm_plane_state);
 
                dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
                        plane->group->index, plane - plane->group->planes);
index d2f66068e52c01c49d798a29c42c752163f15b18..fedb0161e2344d17c3f6f743a016bf853d09c281 100644 (file)
 #define DEFR6_ODPM12_DISP      (2 << 8)
 #define DEFR6_ODPM12_CDE       (3 << 8)
 #define DEFR6_ODPM12_MASK      (3 << 8)
-#define DEFR6_TCNE2            (1 << 6)
+#define DEFR6_TCNE1            (1 << 6)
+#define DEFR6_TCNE0            (1 << 4)
 #define DEFR6_MLOS1            (1 << 2)
-#define DEFR6_DEFAULT          (DEFR6_CODE | DEFR6_TCNE2)
+#define DEFR6_DEFAULT          (DEFR6_CODE | DEFR6_TCNE1)
 
 /* -----------------------------------------------------------------------------
  * R8A7790-only Control Registers
index 9d7e5c99caf691ddf0e97167aaf7165ef9f4f9e0..8d6125c1c0f9352fd9f5449b8877f4c3e89be05b 100644 (file)
@@ -28,7 +28,6 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
 
 static const struct drm_connector_helper_funcs connector_helper_funcs = {
        .get_modes = rcar_du_vga_connector_get_modes,
-       .best_encoder = rcar_du_connector_best_encoder,
 };
 
 static enum drm_connector_status
@@ -79,7 +78,5 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
        if (ret < 0)
                return ret;
 
-       rcon->encoder = renc;
-
        return 0;
 }
index d30bdc38a760a71e5da6c41c988a204147c657b9..3c58669a06cea1c7d582798d9eb6249e1d3db816 100644 (file)
@@ -2,12 +2,9 @@ config DRM_ROCKCHIP
        tristate "DRM Support for Rockchip"
        depends on DRM && ROCKCHIP_IOMMU
        depends on RESET_CONTROLLER
+       select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_PANEL
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
        select VIDEOMODE_HELPERS
        help
          Choose this option if you have a Rockchip soc chipset.
index 7f6a55cae27a7527a4c36ee80efeef5199a4a85a..89aadbf465f8563cdefb8174ff5cfca608876d09 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/component.h>
 #include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/regmap.h>
 #include <linux/reset.h>
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_vop.h"
 
+#define RK3288_GRF_SOC_CON6            0x25c
+#define RK3288_EDP_LCDC_SEL            BIT(5)
+#define RK3399_GRF_SOC_CON20           0x6250
+#define RK3399_EDP_LCDC_SEL            BIT(5)
+
+#define HIWORD_UPDATE(val, mask)       (val | (mask) << 16)
+
 #define to_dp(nm)      container_of(nm, struct rockchip_dp_device, nm)
 
-/* dp grf register offset */
-#define GRF_SOC_CON6                            0x025c
-#define GRF_EDP_LCD_SEL_MASK                    BIT(5)
-#define GRF_EDP_SEL_VOP_LIT                     BIT(5)
-#define GRF_EDP_SEL_VOP_BIG                     0
+/**
+ * struct rockchip_dp_chip_data - splite the grf setting of kind of chips
+ * @lcdsel_grf_reg: grf register offset of lcdc select
+ * @lcdsel_big: reg value of selecting vop big for eDP
+ * @lcdsel_lit: reg value of selecting vop little for eDP
+ * @chip_type: specific chip type
+ */
+struct rockchip_dp_chip_data {
+       u32     lcdsel_grf_reg;
+       u32     lcdsel_big;
+       u32     lcdsel_lit;
+       u32     chip_type;
+};
 
 struct rockchip_dp_device {
        struct drm_device        *drm_dev;
@@ -48,9 +64,12 @@ struct rockchip_dp_device {
        struct drm_display_mode  mode;
 
        struct clk               *pclk;
+       struct clk               *grfclk;
        struct regmap            *grf;
        struct reset_control     *rst;
 
+       const struct rockchip_dp_chip_data *data;
+
        struct analogix_dp_plat_data plat_data;
 };
 
@@ -77,6 +96,7 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
        ret = rockchip_dp_pre_init(dp);
        if (ret < 0) {
                dev_err(dp->dev, "failed to dp pre init %d\n", ret);
+               clk_disable_unprepare(dp->pclk);
                return ret;
        }
 
@@ -92,6 +112,23 @@ static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
        return 0;
 }
 
+static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
+                                struct drm_connector *connector)
+{
+       struct drm_display_info *di = &connector->display_info;
+       /* VOP couldn't output YUV video format for eDP rightly */
+       u32 mask = DRM_COLOR_FORMAT_YCRCB444 | DRM_COLOR_FORMAT_YCRCB422;
+
+       if ((di->color_formats & mask)) {
+               DRM_DEBUG_KMS("Swapping display color format from YUV to RGB\n");
+               di->color_formats &= ~mask;
+               di->color_formats |= DRM_COLOR_FORMAT_RGB444;
+               di->bpc = 8;
+       }
+
+       return 0;
+}
+
 static bool
 rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
                                   const struct drm_display_mode *mode,
@@ -119,17 +156,23 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
                return;
 
        if (ret)
-               val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16);
+               val = dp->data->lcdsel_lit;
        else
-               val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16);
+               val = dp->data->lcdsel_big;
 
        dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
 
-       ret = regmap_write(dp->grf, GRF_SOC_CON6, val);
-       if (ret != 0) {
-               dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+       ret = clk_prepare_enable(dp->grfclk);
+       if (ret < 0) {
+               dev_err(dp->dev, "failed to enable grfclk %d\n", ret);
                return;
        }
+
+       ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
+       if (ret != 0)
+               dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+
+       clk_disable_unprepare(dp->grfclk);
 }
 
 static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
@@ -143,22 +186,29 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
                                      struct drm_connector_state *conn_state)
 {
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+       struct rockchip_dp_device *dp = to_dp(encoder);
+       int ret;
 
        /*
-        * FIXME(Yakir): driver should configure the CRTC output video
-        * mode with the display information which indicated the monitor
-        * support colorimetry.
-        *
-        * But don't know why the CRTC driver seems could only output the
-        * RGBaaa rightly. For example, if connect the "innolux,n116bge"
-        * eDP screen, EDID would indicated that screen only accepted the
-        * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
-        * screen would show a blue picture (RGB888 show a green picture).
-        * But if I configure CTRC to RGBaaa, and eDP driver still keep
-        * RGB666 input video mode, then screen would works prefect.
+        * The hardware IC designed that VOP must output the RGB10 video
+        * format to eDP controller, and if eDP panel only support RGB8,
+        * then eDP controller should cut down the video data, not via VOP
+        * controller, that's why we need to hardcode the VOP output mode
+        * to RGA10 here.
         */
+
        s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
        s->output_type = DRM_MODE_CONNECTOR_eDP;
+       if (dp->data->chip_type == RK3399_EDP) {
+               /*
+                * For RK3399, VOP Lit must code the out mode to RGB888,
+                * VOP Big must code the out mode to RGB10.
+                */
+               ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
+                                                       encoder);
+               if (ret > 0)
+                       s->output_mode = ROCKCHIP_OUT_MODE_P888;
+       }
 
        return 0;
 }
@@ -192,6 +242,16 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
                return PTR_ERR(dp->grf);
        }
 
+       dp->grfclk = devm_clk_get(dev, "grf");
+       if (PTR_ERR(dp->grfclk) == -ENOENT) {
+               dp->grfclk = NULL;
+       } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
+               return -EPROBE_DEFER;
+       } else if (IS_ERR(dp->grfclk)) {
+               dev_err(dev, "failed to get grf clock\n");
+               return PTR_ERR(dp->grfclk);
+       }
+
        dp->pclk = devm_clk_get(dev, "pclk");
        if (IS_ERR(dp->pclk)) {
                dev_err(dev, "failed to get pclk property\n");
@@ -213,6 +273,7 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
        ret = rockchip_dp_pre_init(dp);
        if (ret < 0) {
                dev_err(dp->dev, "failed to pre init %d\n", ret);
+               clk_disable_unprepare(dp->pclk);
                return ret;
        }
 
@@ -246,6 +307,7 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
                            void *data)
 {
        struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+       const struct rockchip_dp_chip_data *dp_data;
        struct drm_device *drm_dev = data;
        int ret;
 
@@ -256,10 +318,15 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
         */
        dev_set_drvdata(dev, NULL);
 
+       dp_data = of_device_get_match_data(dev);
+       if (!dp_data)
+               return -ENODEV;
+
        ret = rockchip_dp_init(dp);
        if (ret < 0)
                return ret;
 
+       dp->data = dp_data;
        dp->drm_dev = drm_dev;
 
        ret = rockchip_dp_drm_create_encoder(dp);
@@ -270,9 +337,10 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
 
        dp->plat_data.encoder = &dp->encoder;
 
-       dp->plat_data.dev_type = RK3288_DP;
+       dp->plat_data.dev_type = dp->data->chip_type;
        dp->plat_data.power_on = rockchip_dp_poweron;
        dp->plat_data.power_off = rockchip_dp_powerdown;
+       dp->plat_data.get_modes = rockchip_dp_get_modes;
 
        return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
 }
@@ -292,38 +360,33 @@ static int rockchip_dp_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct device_node *panel_node, *port, *endpoint;
+       struct drm_panel *panel = NULL;
        struct rockchip_dp_device *dp;
-       struct drm_panel *panel;
 
        port = of_graph_get_port_by_id(dev->of_node, 1);
-       if (!port) {
-               dev_err(dev, "can't find output port\n");
-               return -EINVAL;
-       }
-
-       endpoint = of_get_child_by_name(port, "endpoint");
-       of_node_put(port);
-       if (!endpoint) {
-               dev_err(dev, "no output endpoint found\n");
-               return -EINVAL;
-       }
-
-       panel_node = of_graph_get_remote_port_parent(endpoint);
-       of_node_put(endpoint);
-       if (!panel_node) {
-               dev_err(dev, "no output node found\n");
-               return -EINVAL;
-       }
-
-       panel = of_drm_find_panel(panel_node);
-       if (!panel) {
-               DRM_ERROR("failed to find panel\n");
+       if (port) {
+               endpoint = of_get_child_by_name(port, "endpoint");
+               of_node_put(port);
+               if (!endpoint) {
+                       dev_err(dev, "no output endpoint found\n");
+                       return -EINVAL;
+               }
+
+               panel_node = of_graph_get_remote_port_parent(endpoint);
+               of_node_put(endpoint);
+               if (!panel_node) {
+                       dev_err(dev, "no output node found\n");
+                       return -EINVAL;
+               }
+
+               panel = of_drm_find_panel(panel_node);
                of_node_put(panel_node);
-               return -EPROBE_DEFER;
+               if (!panel) {
+                       DRM_ERROR("failed to find panel\n");
+                       return -EPROBE_DEFER;
+               }
        }
 
-       of_node_put(panel_node);
-
        dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
        if (!dp)
                return -ENOMEM;
@@ -349,24 +412,30 @@ static int rockchip_dp_remove(struct platform_device *pdev)
        return 0;
 }
 
+static const struct dev_pm_ops rockchip_dp_pm_ops = {
 #ifdef CONFIG_PM_SLEEP
-static int rockchip_dp_suspend(struct device *dev)
-{
-       return analogix_dp_suspend(dev);
-}
-
-static int rockchip_dp_resume(struct device *dev)
-{
-       return analogix_dp_resume(dev);
-}
+       .suspend = analogix_dp_suspend,
+       .resume_early = analogix_dp_resume,
 #endif
+};
 
-static const struct dev_pm_ops rockchip_dp_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume)
+static const struct rockchip_dp_chip_data rk3399_edp = {
+       .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+       .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
+       .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
+       .chip_type = RK3399_EDP,
+};
+
+static const struct rockchip_dp_chip_data rk3288_dp = {
+       .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+       .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
+       .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
+       .chip_type = RK3288_DP,
 };
 
 static const struct of_device_id rockchip_dp_dt_ids[] = {
-       {.compatible = "rockchip,rk3288-dp",},
+       {.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
+       {.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
        {}
 };
 MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
index dedc65b40f36c5abd16115dc23cce208d730152d..ca22e5ee89ca00d110e2f7a9679c073ff88d0122 100644 (file)
@@ -964,18 +964,9 @@ static enum drm_mode_status dw_mipi_dsi_mode_valid(
        return mode_status;
 }
 
-static struct drm_encoder *dw_mipi_dsi_connector_best_encoder(
-                                       struct drm_connector *connector)
-{
-       struct dw_mipi_dsi *dsi = con_to_dsi(connector);
-
-       return &dsi->encoder;
-}
-
 static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
        .get_modes = dw_mipi_dsi_connector_get_modes,
        .mode_valid = dw_mipi_dsi_mode_valid,
-       .best_encoder = dw_mipi_dsi_connector_best_encoder,
 };
 
 static enum drm_connector_status
index 801110f65a63896004aea13587b39edec4ceaab4..0665fb915579a6e2bdeece638bfe747e38743842 100644 (file)
@@ -15,7 +15,6 @@
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
 #include <drm/bridge/dw_hdmi.h>
 
 #include "rockchip_drm_drv.h"
index f8b4feb60b256f6e21d8f36b8050c22c7fa02f3e..006260de9dbd22a0be6fda85e1c17ec8a04f1b44 100644 (file)
@@ -579,14 +579,6 @@ inno_hdmi_connector_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static struct drm_encoder *
-inno_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
-       struct inno_hdmi *hdmi = to_inno_hdmi(connector);
-
-       return &hdmi->encoder;
-}
-
 static int
 inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
                                       uint32_t maxX, uint32_t maxY)
@@ -613,7 +605,6 @@ static struct drm_connector_funcs inno_hdmi_connector_funcs = {
 static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
        .get_modes = inno_hdmi_connector_get_modes,
        .mode_valid = inno_hdmi_connector_mode_valid,
-       .best_encoder = inno_hdmi_connector_best_encoder,
 };
 
 static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
index a409d1f703cb2b0dd7ab3767244872c44b9076e8..a822d49a255ad898e57cafdd8a7d9be5aae68c5e 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
 #include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <linux/module.h>
 #include <linux/of_graph.h>
 #include <linux/component.h>
+#include <linux/console.h>
 
 #include "rockchip_drm_drv.h"
 #include "rockchip_drm_fb.h"
@@ -37,6 +39,7 @@
 #define DRIVER_MINOR   0
 
 static bool is_support_iommu = true;
+static struct drm_driver rockchip_drm_driver;
 
 /*
  * Attach a (component) device to the shared drm dma mapping from master drm
@@ -76,7 +79,7 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
        int pipe = drm_crtc_index(crtc);
        struct rockchip_drm_private *priv = crtc->dev->dev_private;
 
-       if (pipe > ROCKCHIP_MAX_CRTC)
+       if (pipe >= ROCKCHIP_MAX_CRTC)
                return -EINVAL;
 
        priv->crtc_funcs[pipe] = crtc_funcs;
@@ -89,7 +92,7 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
        int pipe = drm_crtc_index(crtc);
        struct rockchip_drm_private *priv = crtc->dev->dev_private;
 
-       if (pipe > ROCKCHIP_MAX_CRTC)
+       if (pipe >= ROCKCHIP_MAX_CRTC)
                return;
 
        priv->crtc_funcs[pipe] = NULL;
@@ -132,20 +135,24 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
                priv->crtc_funcs[pipe]->disable_vblank(crtc);
 }
 
-static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
+static int rockchip_drm_bind(struct device *dev)
 {
+       struct drm_device *drm_dev;
        struct rockchip_drm_private *private;
        struct dma_iommu_mapping *mapping = NULL;
-       struct device *dev = drm_dev->dev;
-       struct drm_connector *connector;
        int ret;
 
-       private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
-       if (!private)
+       drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
+       if (!drm_dev)
                return -ENOMEM;
 
-       mutex_init(&private->commit.lock);
-       INIT_WORK(&private->commit.work, rockchip_drm_atomic_work);
+       dev_set_drvdata(dev, drm_dev);
+
+       private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
+       if (!private) {
+               ret = -ENOMEM;
+               goto err_free;
+       }
 
        drm_dev->dev_private = private;
 
@@ -186,23 +193,6 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
        if (ret)
                goto err_detach_device;
 
-       /*
-        * All components are now added, we can publish the connector sysfs
-        * entries to userspace.  This will generate hotplug events and so
-        * userspace will expect to be able to access DRM at this point.
-        */
-       list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
-                       head) {
-               ret = drm_connector_register(connector);
-               if (ret) {
-                       dev_err(drm_dev->dev,
-                               "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
-                               connector->base.id,
-                               connector->name, ret);
-                       goto err_unbind;
-               }
-       }
-
        /* init kms poll for handling hpd */
        drm_kms_helper_poll_init(drm_dev);
 
@@ -222,14 +212,19 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
        if (ret)
                goto err_vblank_cleanup;
 
+       ret = drm_dev_register(drm_dev, 0);
+       if (ret)
+               goto err_fbdev_fini;
+
        if (is_support_iommu)
                arm_iommu_release_mapping(mapping);
        return 0;
+err_fbdev_fini:
+       rockchip_drm_fbdev_fini(drm_dev);
 err_vblank_cleanup:
        drm_vblank_cleanup(drm_dev);
 err_kms_helper_poll_fini:
        drm_kms_helper_poll_fini(drm_dev);
-err_unbind:
        component_unbind_all(dev, drm_dev);
 err_detach_device:
        if (is_support_iommu)
@@ -240,12 +235,14 @@ err_release_mapping:
 err_config_cleanup:
        drm_mode_config_cleanup(drm_dev);
        drm_dev->dev_private = NULL;
+err_free:
+       drm_dev_unref(drm_dev);
        return ret;
 }
 
-static int rockchip_drm_unload(struct drm_device *drm_dev)
+static void rockchip_drm_unbind(struct device *dev)
 {
-       struct device *dev = drm_dev->dev;
+       struct drm_device *drm_dev = dev_get_drvdata(dev);
 
        rockchip_drm_fbdev_fini(drm_dev);
        drm_vblank_cleanup(drm_dev);
@@ -255,32 +252,12 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
                arm_iommu_detach_device(dev);
        drm_mode_config_cleanup(drm_dev);
        drm_dev->dev_private = NULL;
-
-       return 0;
-}
-
-static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
-                                                   struct drm_file *file_priv)
-{
-       struct rockchip_drm_private *priv = crtc->dev->dev_private;
-       int pipe = drm_crtc_index(crtc);
-
-       if (pipe < ROCKCHIP_MAX_CRTC &&
-           priv->crtc_funcs[pipe] &&
-           priv->crtc_funcs[pipe]->cancel_pending_vblank)
-               priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
-}
-
-static void rockchip_drm_preclose(struct drm_device *dev,
-                                 struct drm_file *file_priv)
-{
-       struct drm_crtc *crtc;
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-               rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
+       drm_dev_unregister(drm_dev);
+       drm_dev_unref(drm_dev);
+       dev_set_drvdata(dev, NULL);
 }
 
-void rockchip_drm_lastclose(struct drm_device *dev)
+static void rockchip_drm_lastclose(struct drm_device *dev)
 {
        struct rockchip_drm_private *priv = dev->dev_private;
 
@@ -300,23 +277,15 @@ static const struct file_operations rockchip_drm_driver_fops = {
        .release = drm_release,
 };
 
-const struct vm_operations_struct rockchip_drm_vm_ops = {
-       .open = drm_gem_vm_open,
-       .close = drm_gem_vm_close,
-};
-
 static struct drm_driver rockchip_drm_driver = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM |
                                  DRIVER_PRIME | DRIVER_ATOMIC,
-       .load                   = rockchip_drm_load,
-       .unload                 = rockchip_drm_unload,
-       .preclose               = rockchip_drm_preclose,
        .lastclose              = rockchip_drm_lastclose,
        .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = rockchip_drm_crtc_enable_vblank,
        .disable_vblank         = rockchip_drm_crtc_disable_vblank,
-       .gem_vm_ops             = &rockchip_drm_vm_ops,
-       .gem_free_object        = rockchip_gem_free_object,
+       .gem_vm_ops             = &drm_gem_cma_vm_ops,
+       .gem_free_object_unlocked = rockchip_gem_free_object,
        .dumb_create            = rockchip_gem_dumb_create,
        .dumb_map_offset        = rockchip_gem_dumb_map_offset,
        .dumb_destroy           = drm_gem_dumb_destroy,
@@ -337,25 +306,38 @@ static struct drm_driver rockchip_drm_driver = {
 };
 
 #ifdef CONFIG_PM_SLEEP
-static int rockchip_drm_sys_suspend(struct device *dev)
+void rockchip_drm_fb_suspend(struct drm_device *drm)
 {
-       struct drm_device *drm = dev_get_drvdata(dev);
-       struct drm_connector *connector;
+       struct rockchip_drm_private *priv = drm->dev_private;
 
-       if (!drm)
-               return 0;
+       console_lock();
+       drm_fb_helper_set_suspend(&priv->fbdev_helper, 1);
+       console_unlock();
+}
 
-       drm_modeset_lock_all(drm);
-       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               int old_dpms = connector->dpms;
+void rockchip_drm_fb_resume(struct drm_device *drm)
+{
+       struct rockchip_drm_private *priv = drm->dev_private;
 
-               if (connector->funcs->dpms)
-                       connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+       console_lock();
+       drm_fb_helper_set_suspend(&priv->fbdev_helper, 0);
+       console_unlock();
+}
 
-               /* Set the old mode back to the connector for resume */
-               connector->dpms = old_dpms;
+static int rockchip_drm_sys_suspend(struct device *dev)
+{
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct rockchip_drm_private *priv = drm->dev_private;
+
+       drm_kms_helper_poll_disable(drm);
+       rockchip_drm_fb_suspend(drm);
+
+       priv->state = drm_atomic_helper_suspend(drm);
+       if (IS_ERR(priv->state)) {
+               rockchip_drm_fb_resume(drm);
+               drm_kms_helper_poll_enable(drm);
+               return PTR_ERR(priv->state);
        }
-       drm_modeset_unlock_all(drm);
 
        return 0;
 }
@@ -363,47 +345,11 @@ static int rockchip_drm_sys_suspend(struct device *dev)
 static int rockchip_drm_sys_resume(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
-       struct drm_connector *connector;
-       enum drm_connector_status status;
-       bool changed = false;
-
-       if (!drm)
-               return 0;
+       struct rockchip_drm_private *priv = drm->dev_private;
 
-       drm_modeset_lock_all(drm);
-       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               int desired_mode = connector->dpms;
-
-               /*
-                * at suspend time, we save dpms to connector->dpms,
-                * restore the old_dpms, and at current time, the connector
-                * dpms status must be DRM_MODE_DPMS_OFF.
-                */
-               connector->dpms = DRM_MODE_DPMS_OFF;
-
-               /*
-                * If the connector has been disconnected during suspend,
-                * disconnect it from the encoder and leave it off. We'll notify
-                * userspace at the end.
-                */
-               if (desired_mode == DRM_MODE_DPMS_ON) {
-                       status = connector->funcs->detect(connector, true);
-                       if (status == connector_status_disconnected) {
-                               connector->encoder = NULL;
-                               connector->status = status;
-                               changed = true;
-                               continue;
-                       }
-               }
-               if (connector->funcs->dpms)
-                       connector->funcs->dpms(connector, desired_mode);
-       }
-       drm_modeset_unlock_all(drm);
-
-       drm_helper_resume_force_mode(drm);
-
-       if (changed)
-               drm_kms_helper_hotplug_event(drm);
+       drm_atomic_helper_resume(drm, priv->state);
+       rockchip_drm_fb_resume(drm);
+       drm_kms_helper_poll_enable(drm);
 
        return 0;
 }
@@ -444,37 +390,6 @@ static void rockchip_add_endpoints(struct device *dev,
        }
 }
 
-static int rockchip_drm_bind(struct device *dev)
-{
-       struct drm_device *drm;
-       int ret;
-
-       drm = drm_dev_alloc(&rockchip_drm_driver, dev);
-       if (!drm)
-               return -ENOMEM;
-
-       ret = drm_dev_register(drm, 0);
-       if (ret)
-               goto err_free;
-
-       dev_set_drvdata(dev, drm);
-
-       return 0;
-
-err_free:
-       drm_dev_unref(drm);
-       return ret;
-}
-
-static void rockchip_drm_unbind(struct device *dev)
-{
-       struct drm_device *drm = dev_get_drvdata(dev);
-
-       drm_dev_unregister(drm);
-       drm_dev_unref(drm);
-       dev_set_drvdata(dev, NULL);
-}
-
 static const struct component_master_ops rockchip_drm_ops = {
        .bind = rockchip_drm_bind,
        .unbind = rockchip_drm_unbind,
@@ -518,6 +433,7 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
                        is_support_iommu = false;
                }
 
+               of_node_put(iommu);
                component_match_add(dev, &match, compare_of, port->parent);
                of_node_put(port);
        }
index 56f43a364c7fce08d72136de7764c0762c95b46e..ea393294006151f6ffb6aa816a5b802879168e21 100644 (file)
@@ -40,14 +40,6 @@ struct rockchip_crtc_funcs {
        int (*enable_vblank)(struct drm_crtc *crtc);
        void (*disable_vblank)(struct drm_crtc *crtc);
        void (*wait_for_update)(struct drm_crtc *crtc);
-       void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
-};
-
-struct rockchip_atomic_commit {
-       struct work_struct      work;
-       struct drm_atomic_state *state;
-       struct drm_device *dev;
-       struct mutex lock;
 };
 
 struct rockchip_crtc_state {
@@ -68,11 +60,9 @@ struct rockchip_drm_private {
        struct drm_fb_helper fbdev_helper;
        struct drm_gem_object *fbdev_bo;
        const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
-
-       struct rockchip_atomic_commit commit;
+       struct drm_atomic_state *state;
 };
 
-void rockchip_drm_atomic_work(struct work_struct *work);
 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
                                 const struct rockchip_crtc_funcs *crtc_funcs);
 void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
index 755cfdba61cd21f0f5b7f93743f1bb843c1cbf5f..55c52734c52d3f63895f1cac04c639df0a41f503 100644 (file)
@@ -20,6 +20,7 @@
 #include <drm/drm_crtc_helper.h>
 
 #include "rockchip_drm_drv.h"
+#include "rockchip_drm_fb.h"
 #include "rockchip_drm_gem.h"
 
 #define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
@@ -43,14 +44,10 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
 static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
 {
        struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
-       struct drm_gem_object *obj;
        int i;
 
-       for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
-               obj = rockchip_fb->obj[i];
-               if (obj)
-                       drm_gem_object_unreference_unlocked(obj);
-       }
+       for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
+               drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]);
 
        drm_framebuffer_cleanup(fb);
        kfree(rockchip_fb);
@@ -228,87 +225,32 @@ rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_stat
 }
 
 static void
-rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit)
+rockchip_atomic_commit_tail(struct drm_atomic_state *state)
 {
-       struct drm_atomic_state *state = commit->state;
-       struct drm_device *dev = commit->dev;
+       struct drm_device *dev = state->dev;
 
-       /*
-        * TODO: do fence wait here.
-        */
-
-       /*
-        * Rockchip crtc support runtime PM, can't update display planes
-        * when crtc is disabled.
-        *
-        * drm_atomic_helper_commit comments detail that:
-        *     For drivers supporting runtime PM the recommended sequence is
-        *
-        *     drm_atomic_helper_commit_modeset_disables(dev, state);
-        *
-        *     drm_atomic_helper_commit_modeset_enables(dev, state);
-        *
-        *     drm_atomic_helper_commit_planes(dev, state, true);
-        *
-        * See the kerneldoc entries for these three functions for more details.
-        */
        drm_atomic_helper_commit_modeset_disables(dev, state);
 
        drm_atomic_helper_commit_modeset_enables(dev, state);
 
        drm_atomic_helper_commit_planes(dev, state, true);
 
+       drm_atomic_helper_commit_hw_done(state);
+
        rockchip_atomic_wait_for_complete(dev, state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
-
-       drm_atomic_state_free(state);
-}
-
-void rockchip_drm_atomic_work(struct work_struct *work)
-{
-       struct rockchip_atomic_commit *commit = container_of(work,
-                                       struct rockchip_atomic_commit, work);
-
-       rockchip_atomic_commit_complete(commit);
 }
 
-int rockchip_drm_atomic_commit(struct drm_device *dev,
-                              struct drm_atomic_state *state,
-                              bool nonblock)
-{
-       struct rockchip_drm_private *private = dev->dev_private;
-       struct rockchip_atomic_commit *commit = &private->commit;
-       int ret;
-
-       ret = drm_atomic_helper_prepare_planes(dev, state);
-       if (ret)
-               return ret;
-
-       /* serialize outstanding nonblocking commits */
-       mutex_lock(&commit->lock);
-       flush_work(&commit->work);
-
-       drm_atomic_helper_swap_state(dev, state);
-
-       commit->dev = dev;
-       commit->state = state;
-
-       if (nonblock)
-               schedule_work(&commit->work);
-       else
-               rockchip_atomic_commit_complete(commit);
-
-       mutex_unlock(&commit->lock);
-
-       return 0;
-}
+static struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
+       .atomic_commit_tail = rockchip_atomic_commit_tail,
+};
 
 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
        .fb_create = rockchip_user_fb_create,
        .output_poll_changed = rockchip_drm_output_poll_changed,
        .atomic_check = drm_atomic_helper_check,
-       .atomic_commit = rockchip_drm_atomic_commit,
+       .atomic_commit = drm_atomic_helper_commit,
 };
 
 struct drm_framebuffer *
@@ -339,4 +281,5 @@ void rockchip_drm_mode_config_init(struct drm_device *dev)
        dev->mode_config.max_height = 4096;
 
        dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
+       dev->mode_config.helper_private = &rockchip_mode_config_helpers;
 }
index f261512bb4a03539f55e40b0e545c3ee8469c57e..207e01de6e3214f47c1bcf509977b8d307ca64c0 100644 (file)
@@ -108,7 +108,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
        fbi->screen_size = rk_obj->base.size;
        fbi->fix.smem_len = rk_obj->base.size;
 
-       DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
+       DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
                      fb->width, fb->height, fb->depth, rk_obj->kvaddr,
                      offset, size);
 
@@ -156,9 +156,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
                goto err_drm_fb_helper_fini;
        }
 
-       /* disable all the possible outputs/crtcs before entering KMS mode */
-       drm_helper_disable_unused_functions(dev);
-
        ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
        if (ret < 0) {
                dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
index 9c2d8a894093876f027fb44bb9f73d1cbb23b86e..059e902f872d72f2f1585e0edee3ba40888c2ed0 100644 (file)
@@ -38,7 +38,7 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
                                         &rk_obj->dma_addr, GFP_KERNEL,
                                         &rk_obj->dma_attrs);
        if (!rk_obj->kvaddr) {
-               DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size);
+               DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
                return -ENOMEM;
        }
 
index 1c4d5b5a70a2b8b2b61f22581ee9428f47913c40..91305eb7d312ade85f05e3ce303c4136c48c3ccc 100644 (file)
@@ -98,7 +98,9 @@ struct vop_win {
        const struct vop_win_data *data;
        struct vop *vop;
 
-       struct vop_plane_state state;
+       /* protected by dev->event_lock */
+       bool enable;
+       dma_addr_t yrgb_mst;
 };
 
 struct vop {
@@ -112,6 +114,8 @@ struct vop {
        bool vsync_work_pending;
        struct completion dsp_hold_completion;
        struct completion wait_update_complete;
+
+       /* protected by dev->event_lock */
        struct drm_pending_vblank_event *event;
 
        const struct vop_data *data;
@@ -324,9 +328,9 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
                            scl_cal_scale2(src_h, dst_h));
                if (is_yuv) {
                        VOP_SCL_SET(vop, win, scale_cbcr_x,
-                                   scl_cal_scale2(src_w, dst_w));
+                                   scl_cal_scale2(cbcr_src_w, dst_w));
                        VOP_SCL_SET(vop, win, scale_cbcr_y,
-                                   scl_cal_scale2(src_h, dst_h));
+                                   scl_cal_scale2(cbcr_src_h, dst_h));
                }
                return;
        }
@@ -431,9 +435,6 @@ static void vop_enable(struct drm_crtc *crtc)
        struct vop *vop = to_vop(crtc);
        int ret;
 
-       if (vop->is_enabled)
-               return;
-
        ret = pm_runtime_get_sync(vop->dev);
        if (ret < 0) {
                dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
@@ -501,8 +502,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
        struct vop *vop = to_vop(crtc);
        int i;
 
-       if (!vop->is_enabled)
-               return;
+       WARN_ON(vop->event);
 
        /*
         * We need to make sure that all windows are disabled before we
@@ -553,6 +553,14 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
        clk_disable(vop->aclk);
        clk_disable(vop->hclk);
        pm_runtime_put(vop->dev);
+
+       if (crtc->state->event && !crtc->state->active) {
+               spin_lock_irq(&crtc->dev->event_lock);
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+
+               crtc->state->event = NULL;
+       }
 }
 
 static void vop_plane_destroy(struct drm_plane *plane)
@@ -618,6 +626,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
 
        ret = drm_plane_helper_check_update(plane, crtc, state->fb,
                                            src, dest, &clip,
+                                           state->rotation,
                                            min_scale,
                                            max_scale,
                                            true, true, &visible);
@@ -658,6 +667,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
        if (!old_state->crtc)
                return;
 
+       spin_lock_irq(&plane->dev->event_lock);
+       vop_win->enable = false;
+       vop_win->yrgb_mst = 0;
+       spin_unlock_irq(&plane->dev->event_lock);
+
        spin_lock(&vop->reg_lock);
 
        VOP_WIN_SET(vop, win, enable, 0);
@@ -692,7 +706,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        /*
         * can't update plane when vop is disabled.
         */
-       if (!crtc)
+       if (WARN_ON(!crtc))
                return;
 
        if (WARN_ON(!vop->is_enabled))
@@ -721,6 +735,11 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
        offset += (src->y1 >> 16) * fb->pitches[0];
        vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
 
+       spin_lock_irq(&plane->dev->event_lock);
+       vop_win->enable = true;
+       vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
+       spin_unlock_irq(&plane->dev->event_lock);
+
        spin_lock(&vop->reg_lock);
 
        VOP_WIN_SET(vop, win, format, vop_plane_state->format);
@@ -779,7 +798,7 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = {
        .atomic_disable = vop_plane_atomic_disable,
 };
 
-void vop_atomic_plane_reset(struct drm_plane *plane)
+static void vop_atomic_plane_reset(struct drm_plane *plane)
 {
        struct vop_plane_state *vop_plane_state =
                                        to_vop_plane_state(plane->state);
@@ -796,7 +815,7 @@ void vop_atomic_plane_reset(struct drm_plane *plane)
        plane->state->plane = plane;
 }
 
-struct drm_plane_state *
+static struct drm_plane_state *
 vop_atomic_plane_duplicate_state(struct drm_plane *plane)
 {
        struct vop_plane_state *old_vop_plane_state;
@@ -876,30 +895,10 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
        WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
 }
 
-static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
-                                          struct drm_file *file_priv)
-{
-       struct drm_device *drm = crtc->dev;
-       struct vop *vop = to_vop(crtc);
-       struct drm_pending_vblank_event *e;
-       unsigned long flags;
-
-       spin_lock_irqsave(&drm->event_lock, flags);
-       e = vop->event;
-       if (e && e->base.file_priv == file_priv) {
-               vop->event = NULL;
-
-               e->base.destroy(&e->base);
-               file_priv->event_space += sizeof(e->event);
-       }
-       spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
 static const struct rockchip_crtc_funcs private_crtc_funcs = {
        .enable_vblank = vop_crtc_enable_vblank,
        .disable_vblank = vop_crtc_disable_vblank,
        .wait_for_update = vop_crtc_wait_for_update,
-       .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
 };
 
 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -931,6 +930,8 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
        u16 vact_end = vact_st + vdisplay;
        uint32_t val;
 
+       WARN_ON(vop->event);
+
        vop_enable(crtc);
        /*
         * If dclk rate is zero, mean that scanout is stop,
@@ -1027,12 +1028,15 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
 {
        struct vop *vop = to_vop(crtc);
 
+       spin_lock_irq(&crtc->dev->event_lock);
        if (crtc->state->event) {
                WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+               WARN_ON(vop->event);
 
                vop->event = crtc->state->event;
                crtc->state->event = NULL;
        }
+       spin_unlock_irq(&crtc->dev->event_lock);
 }
 
 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
@@ -1048,6 +1052,17 @@ static void vop_crtc_destroy(struct drm_crtc *crtc)
        drm_crtc_cleanup(crtc);
 }
 
+static void vop_crtc_reset(struct drm_crtc *crtc)
+{
+       if (crtc->state)
+               __drm_atomic_helper_crtc_destroy_state(crtc->state);
+       kfree(crtc->state);
+
+       crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
+       if (crtc->state)
+               crtc->state->crtc = crtc;
+}
+
 static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
 {
        struct rockchip_crtc_state *rockchip_state;
@@ -1073,23 +1088,21 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
        .page_flip = drm_atomic_helper_page_flip,
        .destroy = vop_crtc_destroy,
-       .reset = drm_atomic_helper_crtc_reset,
+       .reset = vop_crtc_reset,
        .atomic_duplicate_state = vop_crtc_duplicate_state,
        .atomic_destroy_state = vop_crtc_destroy_state,
 };
 
 static bool vop_win_pending_is_complete(struct vop_win *vop_win)
 {
-       struct drm_plane *plane = &vop_win->base;
-       struct vop_plane_state *state = to_vop_plane_state(plane->state);
        dma_addr_t yrgb_mst;
 
-       if (!state->enable)
+       if (!vop_win->enable)
                return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
 
        yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
 
-       return yrgb_mst == state->yrgb_mst;
+       return yrgb_mst == vop_win->yrgb_mst;
 }
 
 static void vop_handle_vblank(struct vop *vop)
@@ -1104,15 +1117,16 @@ static void vop_handle_vblank(struct vop *vop)
                        return;
        }
 
+       spin_lock_irqsave(&drm->event_lock, flags);
        if (vop->event) {
-               spin_lock_irqsave(&drm->event_lock, flags);
 
                drm_crtc_send_vblank_event(crtc, vop->event);
                drm_crtc_vblank_put(crtc);
                vop->event = NULL;
 
-               spin_unlock_irqrestore(&drm->event_lock, flags);
        }
+       spin_unlock_irqrestore(&drm->event_lock, flags);
+
        if (!completion_done(&vop->wait_update_complete))
                complete(&vop->wait_update_complete);
 }
index 3166b46a5893fbe39a3c81fd2b928e41244066f2..919992cdc97e32f190b285f874c8c3944dd93ea3 100644 (file)
@@ -190,7 +190,7 @@ static const struct vop_data rk3288_vop = {
        .win_size = ARRAY_SIZE(rk3288_vop_win_data),
 };
 
-static const struct vop_scl_regs rk3066_win_scl = {
+static const struct vop_scl_regs rk3036_win_scl = {
        .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
        .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
        .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
@@ -198,7 +198,7 @@ static const struct vop_scl_regs rk3066_win_scl = {
 };
 
 static const struct vop_win_phy rk3036_win0_data = {
-       .scl = &rk3066_win_scl,
+       .scl = &rk3036_win_scl,
        .data_formats = formats_win_full,
        .nformats = ARRAY_SIZE(formats_win_full),
        .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
@@ -210,6 +210,7 @@ static const struct vop_win_phy rk3036_win0_data = {
        .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
        .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
        .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
+       .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
 };
 
 static const struct vop_win_phy rk3036_win1_data = {
@@ -299,7 +300,7 @@ static int vop_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct platform_driver vop_platform_driver = {
+static struct platform_driver vop_platform_driver = {
        .probe = vop_probe,
        .remove = vop_remove,
        .driver = {
index 8d17d00ddb4b75aea7456fd1ef3dcb4f1fbe5b0e..c987c826daa30fcc066ef01cffd5e5dafc86317b 100644 (file)
@@ -6,7 +6,6 @@ config DRM_SHMOBILE
        select BACKLIGHT_CLASS_DEVICE
        select BACKLIGHT_LCD_SUPPORT
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
        help
index 1e154fc779d55aa7db6d993068abd36cc30de06d..6547b1db460a23abf45d79a6df797c854deb5dc6 100644 (file)
@@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
        scrtc->event = NULL;
        if (event) {
                drm_crtc_send_vblank_event(&scrtc->crtc, event);
-               drm_vblank_put(dev, 0);
+               drm_crtc_vblank_put(&scrtc->crtc);
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
@@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
 
        if (event) {
                event->pipe = 0;
-               drm_vblank_get(dev, 0);
+               drm_crtc_vblank_get(&scrtc->crtc);
                spin_lock_irqsave(&dev->event_lock, flags);
                scrtc->event = event;
                spin_unlock_irqrestore(&dev->event_lock, flags);
index 7700ff1720792108e9847bebb4dca522ac45ff45..f0492603ea885d0767a375d782913c9da391cb62 100644 (file)
@@ -259,12 +259,11 @@ static struct drm_driver shmob_drm_driver = {
                                | DRIVER_PRIME,
        .load                   = shmob_drm_load,
        .unload                 = shmob_drm_unload,
-       .set_busid              = drm_platform_set_busid,
        .irq_handler            = shmob_drm_irq,
        .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = shmob_drm_enable_vblank,
        .disable_vblank         = shmob_drm_disable_vblank,
-       .gem_free_object        = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
        .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
index 93ad8a5704d19233c173c91d235b26a1f605e535..03defda77766a9984063f8300a46415d14d32a53 100644 (file)
@@ -316,7 +316,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
        struct sis_file_private *file_priv = file->driver_priv;
        struct sis_memblock *entry, *next;
 
-       if (!(file->minor->master && file->master->lock.hw_lock))
+       if (!(dev->master && file->master->lock.hw_lock))
                return;
 
        drm_legacy_idlelock_take(&file->master->lock);
index 5ad43a1bb260a828649dacb3f095c3619b2c9e0d..494ab257f77c3917ebdeb8cd9aeaa940aac4876b 100644 (file)
@@ -7,5 +7,6 @@ config DRM_STI
        select DRM_KMS_CMA_HELPER
        select DRM_PANEL
        select FW_LOADER
+       select SND_SOC_HDMI_CODEC if SND_SOC
        help
          Choose this option to enable DRM on STM stiH41x chipset
index a516eb869f6fe81c71b8b77106a9b6a13777105b..2da7d6866d5de3d566bf8b40211730d3f3fc8f19 100644 (file)
@@ -6,6 +6,8 @@
 
 #include "sti_awg_utils.h"
 
+#define AWG_DELAY (-5)
+
 #define AWG_OPCODE_OFFSET 10
 #define AWG_MAX_ARG       0x3ff
 
@@ -125,7 +127,7 @@ static int awg_generate_line_signal(
                val = timing->blanking_level;
                ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
 
-               val = timing->trailing_pixels - 1;
+               val = timing->trailing_pixels - 1 + AWG_DELAY;
                ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
        }
 
index 3d2fa3ab33dffece98b73d1c827b4512c4d07a36..bd74732ea09b0a0b97923138955913e5463bf91f 100644 (file)
@@ -55,6 +55,26 @@ struct sti_compositor_data stih416_compositor_data = {
        },
 };
 
+int sti_compositor_debufs_init(struct sti_compositor *compo,
+                              struct drm_minor *minor)
+{
+       int ret = 0, i;
+
+       for (i = 0; compo->vid[i]; i++) {
+               ret = vid_debugfs_init(compo->vid[i], minor);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; compo->mixer[i]; i++) {
+               ret = sti_mixer_debugfs_init(compo->mixer[i], minor);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static int sti_compositor_bind(struct device *dev,
                               struct device *master,
                               void *data)
@@ -247,10 +267,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
        vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
        if (vtg_np)
                compo->vtg_main = of_vtg_find(vtg_np);
+       of_node_put(vtg_np);
 
        vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
        if (vtg_np)
                compo->vtg_aux = of_vtg_find(vtg_np);
+       of_node_put(vtg_np);
 
        platform_set_drvdata(pdev, compo);
 
index 1a4a73dab11e94f8451ec8f4805894b2d99a6a57..24444ef42a98258910fcc72254bb5aae46671dce 100644 (file)
@@ -81,4 +81,7 @@ struct sti_compositor {
        struct notifier_block vtg_vblank_nb;
 };
 
+int sti_compositor_debufs_init(struct sti_compositor *compo,
+                              struct drm_minor *minor);
+
 #endif
index 505620c7c2c8fbde2afd79295ecb36231496e64e..c7d734dc3cf405a1799accc0216efd54e1dac3d9 100644 (file)
 static void sti_crtc_enable(struct drm_crtc *crtc)
 {
        struct sti_mixer *mixer = to_sti_mixer(crtc);
-       struct device *dev = mixer->dev;
-       struct sti_compositor *compo = dev_get_drvdata(dev);
 
        DRM_DEBUG_DRIVER("\n");
 
        mixer->status = STI_MIXER_READY;
 
-       /* Prepare and enable the compo IP clock */
-       if (mixer->id == STI_MIXER_MAIN) {
-               if (clk_prepare_enable(compo->clk_compo_main))
-                       DRM_INFO("Failed to prepare/enable compo_main clk\n");
-       } else {
-               if (clk_prepare_enable(compo->clk_compo_aux))
-                       DRM_INFO("Failed to prepare/enable compo_aux clk\n");
-       }
-
        drm_crtc_vblank_on(crtc);
 }
 
@@ -51,24 +40,14 @@ static void sti_crtc_disabling(struct drm_crtc *crtc)
        mixer->status = STI_MIXER_DISABLING;
 }
 
-static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
-                               const struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode)
-{
-       /* accept the provided drm_display_mode, do not fix it up */
-       drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
-       return true;
-}
-
 static int
 sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
 {
        struct sti_mixer *mixer = to_sti_mixer(crtc);
        struct device *dev = mixer->dev;
        struct sti_compositor *compo = dev_get_drvdata(dev);
-       struct clk *clk;
+       struct clk *compo_clk, *pix_clk;
        int rate = mode->clock * 1000;
-       int res;
 
        DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
                      crtc->base.id, sti_mixer_to_str(mixer),
@@ -83,32 +62,46 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
                      mode->vsync_start, mode->vsync_end,
                      mode->vtotal, mode->type, mode->flags);
 
-       /* Set rate and prepare/enable pixel clock */
-       if (mixer->id == STI_MIXER_MAIN)
-               clk = compo->clk_pix_main;
-       else
-               clk = compo->clk_pix_aux;
+       if (mixer->id == STI_MIXER_MAIN) {
+               compo_clk = compo->clk_compo_main;
+               pix_clk = compo->clk_pix_main;
+       } else {
+               compo_clk = compo->clk_compo_aux;
+               pix_clk = compo->clk_pix_aux;
+       }
+
+       /* Prepare and enable the compo IP clock */
+       if (clk_prepare_enable(compo_clk)) {
+               DRM_INFO("Failed to prepare/enable compositor clk\n");
+               goto compo_error;
+       }
 
-       res = clk_set_rate(clk, rate);
-       if (res < 0) {
+       /* Set rate and prepare/enable pixel clock */
+       if (clk_set_rate(pix_clk, rate) < 0) {
                DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
-               return -EINVAL;
+               goto pix_error;
        }
-       if (clk_prepare_enable(clk)) {
+       if (clk_prepare_enable(pix_clk)) {
                DRM_ERROR("Failed to prepare/enable pix clk\n");
-               return -EINVAL;
+               goto pix_error;
        }
 
        sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
                        compo->vtg_main : compo->vtg_aux, &crtc->mode);
 
-       res = sti_mixer_active_video_area(mixer, &crtc->mode);
-       if (res) {
+       if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
                DRM_ERROR("Can't set active video area\n");
-               return -EINVAL;
+               goto mixer_error;
        }
 
-       return res;
+       return 0;
+
+mixer_error:
+       clk_disable_unprepare(pix_clk);
+pix_error:
+       clk_disable_unprepare(compo_clk);
+compo_error:
+       return -EINVAL;
 }
 
 static void sti_crtc_disable(struct drm_crtc *crtc)
@@ -139,7 +132,6 @@ static void sti_crtc_disable(struct drm_crtc *crtc)
 static void
 sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
-       sti_crtc_enable(crtc);
        sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
 }
 
@@ -230,10 +222,7 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
 static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
        .enable = sti_crtc_enable,
        .disable = sti_crtc_disabling,
-       .mode_fixup = sti_crtc_mode_fixup,
-       .mode_set = drm_helper_crtc_mode_set,
        .mode_set_nofb = sti_crtc_mode_set_nofb,
-       .mode_set_base = drm_helper_crtc_mode_set_base,
        .atomic_begin = sti_crtc_atomic_begin,
        .atomic_flush = sti_crtc_atomic_flush,
 };
@@ -341,6 +330,17 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
        }
 }
 
+static int sti_crtc_late_register(struct drm_crtc *crtc)
+{
+       struct sti_mixer *mixer = to_sti_mixer(crtc);
+       struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
+
+       if (drm_crtc_index(crtc) == 0)
+               return sti_compositor_debufs_init(compo, crtc->dev->primary);
+
+       return 0;
+}
+
 static const struct drm_crtc_funcs sti_crtc_funcs = {
        .set_config = drm_atomic_helper_set_config,
        .page_flip = drm_atomic_helper_page_flip,
@@ -349,6 +349,7 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
        .reset = drm_atomic_helper_crtc_reset,
        .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+       .late_register = sti_crtc_late_register,
 };
 
 bool sti_crtc_is_main(struct drm_crtc *crtc)
index 4e990299735c0fa1de0b049ee949f8843a416e23..a263bbba4119ad151278cc9a364a77f20258f691 100644 (file)
@@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
 {
        struct drm_info_node *node = s->private;
        struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(s, "%s: (vaddr = 0x%p)",
                   sti_plane_to_str(&cursor->plane), cursor->regs);
@@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
        DBGFS_DUMP(CUR_AWE);
        seq_puts(s, "\n");
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -336,6 +329,33 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
        .atomic_disable = sti_cursor_atomic_disable,
 };
 
+static void sti_cursor_destroy(struct drm_plane *drm_plane)
+{
+       DRM_DEBUG_DRIVER("\n");
+
+       drm_plane_helper_disable(drm_plane);
+       drm_plane_cleanup(drm_plane);
+}
+
+static int sti_cursor_late_register(struct drm_plane *drm_plane)
+{
+       struct sti_plane *plane = to_sti_plane(drm_plane);
+       struct sti_cursor *cursor = to_sti_cursor(plane);
+
+       return cursor_debugfs_init(cursor, drm_plane->dev->primary);
+}
+
+struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = sti_cursor_destroy,
+       .set_property = sti_plane_set_property,
+       .reset = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+       .late_register = sti_cursor_late_register,
+};
+
 struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
                                    struct device *dev, int desc,
                                    void __iomem *baseaddr,
@@ -370,7 +390,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
 
        res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
                                       possible_crtcs,
-                                      &sti_plane_helpers_funcs,
+                                      &sti_cursor_plane_helpers_funcs,
                                       cursor_supported_formats,
                                       ARRAY_SIZE(cursor_supported_formats),
                                       DRM_PLANE_TYPE_CURSOR, NULL);
@@ -384,9 +404,6 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
 
        sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
 
-       if (cursor_debugfs_init(cursor, drm_dev->primary))
-               DRM_ERROR("CURSOR debugfs setup failed\n");
-
        return &cursor->plane.drm_plane;
 
 err_plane:
index 872495e72294107878e6c213200d11a50686a911..96bd3d08b2d424d5dd2ec24d543bfe343c97c2ad 100644 (file)
@@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
        struct drm_info_node *node = s->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_plane *p;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        list_for_each_entry(p, &dev->mode_config.plane_list, head) {
                struct sti_plane *plane = to_sti_plane(p);
@@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
                           plane->fps_info.fips_str);
        }
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -221,7 +215,7 @@ static int sti_atomic_commit(struct drm_device *drm,
         * the software side now.
         */
 
-       drm_atomic_helper_swap_state(drm, state);
+       drm_atomic_helper_swap_state(state, true);
 
        if (nonblock)
                sti_atomic_schedule(private, state);
@@ -232,8 +226,28 @@ static int sti_atomic_commit(struct drm_device *drm,
        return 0;
 }
 
+static void sti_output_poll_changed(struct drm_device *ddev)
+{
+       struct sti_private *private = ddev->dev_private;
+
+       if (!ddev->mode_config.num_connector)
+               return;
+
+       if (private->fbdev) {
+               drm_fbdev_cma_hotplug_event(private->fbdev);
+               return;
+       }
+
+       private->fbdev = drm_fbdev_cma_init(ddev, 32,
+                                           ddev->mode_config.num_crtc,
+                                           ddev->mode_config.num_connector);
+       if (IS_ERR(private->fbdev))
+               private->fbdev = NULL;
+}
+
 static const struct drm_mode_config_funcs sti_mode_config_funcs = {
        .fb_create = drm_fb_cma_create,
+       .output_poll_changed = sti_output_poll_changed,
        .atomic_check = drm_atomic_helper_check,
        .atomic_commit = sti_atomic_commit,
 };
@@ -254,45 +268,6 @@ static void sti_mode_config_init(struct drm_device *dev)
        dev->mode_config.funcs = &sti_mode_config_funcs;
 }
 
-static int sti_load(struct drm_device *dev, unsigned long flags)
-{
-       struct sti_private *private;
-       int ret;
-
-       private = kzalloc(sizeof(*private), GFP_KERNEL);
-       if (!private) {
-               DRM_ERROR("Failed to allocate private\n");
-               return -ENOMEM;
-       }
-       dev->dev_private = (void *)private;
-       private->drm_dev = dev;
-
-       mutex_init(&private->commit.lock);
-       INIT_WORK(&private->commit.work, sti_atomic_work);
-
-       drm_mode_config_init(dev);
-       drm_kms_helper_poll_init(dev);
-
-       sti_mode_config_init(dev);
-
-       ret = component_bind_all(dev->dev, dev);
-       if (ret) {
-               drm_kms_helper_poll_fini(dev);
-               drm_mode_config_cleanup(dev);
-               kfree(private);
-               return ret;
-       }
-
-       drm_mode_config_reset(dev);
-
-       drm_helper_disable_unused_functions(dev);
-       drm_fbdev_cma_init(dev, 32,
-                          dev->mode_config.num_crtc,
-                          dev->mode_config.num_connector);
-
-       return 0;
-}
-
 static const struct file_operations sti_driver_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
@@ -309,8 +284,7 @@ static const struct file_operations sti_driver_fops = {
 static struct drm_driver sti_driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
            DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
-       .load = sti_load,
-       .gem_free_object = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops = &drm_gem_cma_vm_ops,
        .dumb_create = drm_gem_cma_dumb_create,
        .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -346,14 +320,88 @@ static int compare_of(struct device *dev, void *data)
        return dev->of_node == data;
 }
 
+static int sti_init(struct drm_device *ddev)
+{
+       struct sti_private *private;
+
+       private = kzalloc(sizeof(*private), GFP_KERNEL);
+       if (!private)
+               return -ENOMEM;
+
+       ddev->dev_private = (void *)private;
+       dev_set_drvdata(ddev->dev, ddev);
+       private->drm_dev = ddev;
+
+       mutex_init(&private->commit.lock);
+       INIT_WORK(&private->commit.work, sti_atomic_work);
+
+       drm_mode_config_init(ddev);
+
+       sti_mode_config_init(ddev);
+
+       drm_kms_helper_poll_init(ddev);
+
+       return 0;
+}
+
+static void sti_cleanup(struct drm_device *ddev)
+{
+       struct sti_private *private = ddev->dev_private;
+
+       if (private->fbdev) {
+               drm_fbdev_cma_fini(private->fbdev);
+               private->fbdev = NULL;
+       }
+
+       drm_kms_helper_poll_fini(ddev);
+       drm_vblank_cleanup(ddev);
+       kfree(private);
+       ddev->dev_private = NULL;
+}
+
 static int sti_bind(struct device *dev)
 {
-       return drm_platform_init(&sti_driver, to_platform_device(dev));
+       struct drm_device *ddev;
+       int ret;
+
+       ddev = drm_dev_alloc(&sti_driver, dev);
+       if (!ddev)
+               return -ENOMEM;
+
+       ddev->platformdev = to_platform_device(dev);
+
+       ret = sti_init(ddev);
+       if (ret)
+               goto err_drm_dev_unref;
+
+       ret = component_bind_all(ddev->dev, ddev);
+       if (ret)
+               goto err_cleanup;
+
+       ret = drm_dev_register(ddev, 0);
+       if (ret)
+               goto err_register;
+
+       drm_mode_config_reset(ddev);
+
+       return 0;
+
+err_register:
+       drm_mode_config_cleanup(ddev);
+err_cleanup:
+       sti_cleanup(ddev);
+err_drm_dev_unref:
+       drm_dev_unref(ddev);
+       return ret;
 }
 
 static void sti_unbind(struct device *dev)
 {
-       drm_put_dev(dev_get_drvdata(dev));
+       struct drm_device *ddev = dev_get_drvdata(dev);
+
+       drm_dev_unregister(ddev);
+       sti_cleanup(ddev);
+       drm_dev_unref(ddev);
 }
 
 static const struct component_master_ops sti_ops = {
index 30ddc20841c343a6b1c904864107e6b4a4d732a0..78ebe5e30f53ca782c6928266f08989a9fcdac76 100644 (file)
@@ -24,6 +24,7 @@ struct sti_private {
        struct sti_compositor *compo;
        struct drm_property *plane_zorder_property;
        struct drm_device *drm_dev;
+       struct drm_fbdev_cma *fbdev;
 
        struct {
                struct drm_atomic_state *state;
index 25f76632002ccb212873c435ee56fa584aa375fc..00881eb4536e85f3cc196c0389f34cfff98716a6 100644 (file)
@@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
 {
        struct drm_info_node *node = s->private;
        struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
        DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
@@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
        dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
        seq_puts(s, "\n");
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -384,20 +377,10 @@ static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector)
-{
-       struct sti_dvo_connector *dvo_connector
-               = to_sti_dvo_connector(connector);
-
-       /* Best encoder is the one associated during connector creation */
-       return dvo_connector->encoder;
-}
-
 static const
 struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = {
        .get_modes = sti_dvo_connector_get_modes,
        .mode_valid = sti_dvo_connector_mode_valid,
-       .best_encoder = sti_dvo_best_encoder,
 };
 
 static enum drm_connector_status
@@ -421,24 +404,29 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
        return connector_status_disconnected;
 }
 
-static void sti_dvo_connector_destroy(struct drm_connector *connector)
+static int sti_dvo_late_register(struct drm_connector *connector)
 {
        struct sti_dvo_connector *dvo_connector
                = to_sti_dvo_connector(connector);
+       struct sti_dvo *dvo = dvo_connector->dvo;
 
-       drm_connector_unregister(connector);
-       drm_connector_cleanup(connector);
-       kfree(dvo_connector);
+       if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
+               DRM_ERROR("DVO debugfs setup failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 static const struct drm_connector_funcs sti_dvo_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = sti_dvo_connector_detect,
-       .destroy = sti_dvo_connector_destroy,
+       .destroy = drm_connector_cleanup,
        .reset = drm_atomic_helper_connector_reset,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+       .late_register = sti_dvo_late_register,
 };
 
 static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
@@ -509,26 +497,16 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
        drm_connector_helper_add(drm_connector,
                                 &sti_dvo_connector_helper_funcs);
 
-       err = drm_connector_register(drm_connector);
-       if (err)
-               goto err_connector;
-
        err = drm_mode_connector_attach_encoder(drm_connector, encoder);
        if (err) {
                DRM_ERROR("Failed to attach a connector to a encoder\n");
                goto err_sysfs;
        }
 
-       if (dvo_debugfs_init(dvo, drm_dev->primary))
-               DRM_ERROR("DVO debugfs setup failed\n");
-
        return 0;
 
 err_sysfs:
-       drm_connector_unregister(drm_connector);
-err_connector:
        drm_bridge_remove(bridge);
-       drm_connector_cleanup(drm_connector);
        return -EINVAL;
 }
 
@@ -602,6 +580,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
        dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
        if (!dvo->panel_node)
                DRM_ERROR("No panel associated to the dvo output\n");
+       of_node_put(dvo->panel_node);
 
        platform_set_drvdata(pdev, dvo);
 
index ff33c38da197be7a0ac63e0691119dbb2f6cd509..bf63086a3dc8c68308a35712239a38cbd151491d 100644 (file)
@@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
 {
        struct drm_info_node *node = s->private;
        struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
        struct drm_plane *drm_plane = &gdp->plane.drm_plane;
        struct drm_crtc *crtc = drm_plane->crtc;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(s, "%s: (vaddr = 0x%p)",
                   sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
                seq_printf(s, "  Connected to DRM CRTC #%d (%s)\n",
                           crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
 {
        struct drm_info_node *node = s->private;
        struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
        unsigned int b;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        for (b = 0; b < GDP_NODE_NB_BANK; b++) {
                seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
@@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
                gdp_node_dump_node(s, gdp->node_list[b].btm_field);
        }
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -880,6 +866,33 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
        .atomic_disable = sti_gdp_atomic_disable,
 };
 
+static void sti_gdp_destroy(struct drm_plane *drm_plane)
+{
+       DRM_DEBUG_DRIVER("\n");
+
+       drm_plane_helper_disable(drm_plane);
+       drm_plane_cleanup(drm_plane);
+}
+
+static int sti_gdp_late_register(struct drm_plane *drm_plane)
+{
+       struct sti_plane *plane = to_sti_plane(drm_plane);
+       struct sti_gdp *gdp = to_sti_gdp(plane);
+
+       return gdp_debugfs_init(gdp, drm_plane->dev->primary);
+}
+
+struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = sti_gdp_destroy,
+       .set_property = sti_plane_set_property,
+       .reset = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+       .late_register = sti_gdp_late_register,
+};
+
 struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
                                 struct device *dev, int desc,
                                 void __iomem *baseaddr,
@@ -906,7 +919,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
 
        res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
                                       possible_crtcs,
-                                      &sti_plane_helpers_funcs,
+                                      &sti_gdp_plane_helpers_funcs,
                                       gdp_supported_formats,
                                       ARRAY_SIZE(gdp_supported_formats),
                                       type, NULL);
@@ -919,9 +932,6 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
 
        sti_plane_init_property(&gdp->plane, type);
 
-       if (gdp_debugfs_init(gdp, drm_dev->primary))
-               DRM_ERROR("GDP debugfs setup failed\n");
-
        return &gdp->plane.drm_plane;
 
 err:
index f7d3464cdf09af71c984dfa1858f4fe8b5979ab8..8505569f75deec1489fec0f3a74b7ccf8f3067b5 100644 (file)
@@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
 {
        struct drm_info_node *node = s->private;
        struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
        DBGFS_DUMP(HDA_ANA_CFG);
@@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
                hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
        seq_puts(s, "\n");
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -676,20 +669,10 @@ static int sti_hda_connector_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
-{
-       struct sti_hda_connector *hda_connector
-               = to_sti_hda_connector(connector);
-
-       /* Best encoder is the one associated during connector creation */
-       return hda_connector->encoder;
-}
-
 static const
 struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
        .get_modes = sti_hda_connector_get_modes,
        .mode_valid = sti_hda_connector_mode_valid,
-       .best_encoder = sti_hda_best_encoder,
 };
 
 static enum drm_connector_status
@@ -698,24 +681,29 @@ sti_hda_connector_detect(struct drm_connector *connector, bool force)
        return connector_status_connected;
 }
 
-static void sti_hda_connector_destroy(struct drm_connector *connector)
+static int sti_hda_late_register(struct drm_connector *connector)
 {
        struct sti_hda_connector *hda_connector
                = to_sti_hda_connector(connector);
+       struct sti_hda *hda = hda_connector->hda;
+
+       if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
+               DRM_ERROR("HDA debugfs setup failed\n");
+               return -EINVAL;
+       }
 
-       drm_connector_unregister(connector);
-       drm_connector_cleanup(connector);
-       kfree(hda_connector);
+       return 0;
 }
 
 static const struct drm_connector_funcs sti_hda_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = sti_hda_connector_detect,
-       .destroy = sti_hda_connector_destroy,
+       .destroy = drm_connector_cleanup,
        .reset = drm_atomic_helper_connector_reset,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+       .late_register = sti_hda_late_register,
 };
 
 static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
@@ -773,10 +761,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
        drm_connector_helper_add(drm_connector,
                        &sti_hda_connector_helper_funcs);
 
-       err = drm_connector_register(drm_connector);
-       if (err)
-               goto err_connector;
-
        err = drm_mode_connector_attach_encoder(drm_connector, encoder);
        if (err) {
                DRM_ERROR("Failed to attach a connector to a encoder\n");
@@ -786,15 +770,10 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
        /* force to disable hd dacs at startup */
        hda_enable_hd_dacs(hda, false);
 
-       if (hda_debugfs_init(hda, drm_dev->primary))
-               DRM_ERROR("HDA debugfs setup failed\n");
-
        return 0;
 
 err_sysfs:
-       drm_connector_unregister(drm_connector);
-err_connector:
-       drm_connector_cleanup(drm_connector);
+       drm_bridge_remove(bridge);
        return -EINVAL;
 }
 
index 6ef0715bd5b9fdc59a262f4f5f79e39af2a26727..fedc17f98d9b029c835a7791e023e127c30e72a7 100644 (file)
@@ -18,6 +18,8 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 
+#include <sound/hdmi-codec.h>
+
 #include "sti_hdmi.h"
 #include "sti_hdmi_tx3g4c28phy.h"
 #include "sti_hdmi_tx3g0c55phy.h"
@@ -35,6 +37,8 @@
 #define HDMI_DFLT_CHL0_DAT              0x0110
 #define HDMI_DFLT_CHL1_DAT              0x0114
 #define HDMI_DFLT_CHL2_DAT              0x0118
+#define HDMI_AUDIO_CFG                  0x0200
+#define HDMI_SPDIF_FIFO_STATUS          0x0204
 #define HDMI_SW_DI_1_HEAD_WORD          0x0210
 #define HDMI_SW_DI_1_PKT_WORD0          0x0214
 #define HDMI_SW_DI_1_PKT_WORD1          0x0218
@@ -44,6 +48,9 @@
 #define HDMI_SW_DI_1_PKT_WORD5          0x0228
 #define HDMI_SW_DI_1_PKT_WORD6          0x022C
 #define HDMI_SW_DI_CFG                  0x0230
+#define HDMI_SAMPLE_FLAT_MASK           0x0244
+#define HDMI_AUDN                       0x0400
+#define HDMI_AUD_CTS                    0x0404
 #define HDMI_SW_DI_2_HEAD_WORD          0x0600
 #define HDMI_SW_DI_2_PKT_WORD0          0x0604
 #define HDMI_SW_DI_2_PKT_WORD1          0x0608
 #define HDMI_INT_DLL_LCK                BIT(5)
 #define HDMI_INT_NEW_FRAME              BIT(6)
 #define HDMI_INT_GENCTRL_PKT            BIT(7)
+#define HDMI_INT_AUDIO_FIFO_XRUN        BIT(8)
 #define HDMI_INT_SINK_TERM_PRESENT      BIT(11)
 
 #define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
                        | HDMI_INT_GLOBAL)
 
 #define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
+                       | HDMI_INT_AUDIO_FIFO_XRUN \
                        | HDMI_INT_GENCTRL_PKT \
                        | HDMI_INT_NEW_FRAME \
                        | HDMI_INT_DLL_LCK \
 
 #define HDMI_STA_SW_RST                 BIT(1)
 
+#define HDMI_AUD_CFG_8CH               BIT(0)
+#define HDMI_AUD_CFG_SPDIF_DIV_2       BIT(1)
+#define HDMI_AUD_CFG_SPDIF_DIV_3       BIT(2)
+#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4   (BIT(1) | BIT(2))
+#define HDMI_AUD_CFG_CTS_CLK_256FS     BIT(12)
+#define HDMI_AUD_CFG_DTS_INVALID       BIT(16)
+#define HDMI_AUD_CFG_ONE_BIT_INVALID   (BIT(18) | BIT(19) | BIT(20) |  BIT(21))
+#define HDMI_AUD_CFG_CH12_VALID        BIT(28)
+#define HDMI_AUD_CFG_CH34_VALID        BIT(29)
+#define HDMI_AUD_CFG_CH56_VALID        BIT(30)
+#define HDMI_AUD_CFG_CH78_VALID        BIT(31)
+
+/* sample flat mask */
+#define HDMI_SAMPLE_FLAT_NO     0
+#define HDMI_SAMPLE_FLAT_SP0 BIT(0)
+#define HDMI_SAMPLE_FLAT_SP1 BIT(1)
+#define HDMI_SAMPLE_FLAT_SP2 BIT(2)
+#define HDMI_SAMPLE_FLAT_SP3 BIT(3)
+#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\
+                             HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3)
+
 #define HDMI_INFOFRAME_HEADER_TYPE(x)    (((x) & 0xff) <<  0)
 #define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) <<  8)
 #define HDMI_INFOFRAME_HEADER_LEN(x)     (((x) & 0x0f) << 16)
@@ -171,6 +201,10 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
                wake_up_interruptible(&hdmi->wait_event);
        }
 
+       /* Audio FIFO underrun IRQ */
+       if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
+               DRM_INFO("Warning: audio FIFO underrun occurs!");
+
        return IRQ_HANDLED;
 }
 
@@ -441,26 +475,29 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
  */
 static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
 {
-       struct hdmi_audio_infoframe infofame;
+       struct hdmi_audio_params *audio = &hdmi->audio;
        u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
-       int ret;
-
-       ret = hdmi_audio_infoframe_init(&infofame);
-       if (ret < 0) {
-               DRM_ERROR("failed to setup audio infoframe: %d\n", ret);
-               return ret;
-       }
-
-       infofame.channels = 2;
-
-       ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer));
-       if (ret < 0) {
-               DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
-               return ret;
+       int ret, val;
+
+       DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__,
+                        audio->enabled ? "enable" : "disable");
+       if (audio->enabled) {
+               /* set audio parameters stored*/
+               ret = hdmi_audio_infoframe_pack(&audio->cea, buffer,
+                                               sizeof(buffer));
+               if (ret < 0) {
+                       DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
+                       return ret;
+               }
+               hdmi_infoframe_write_infopack(hdmi, buffer, ret);
+       } else {
+               /*disable audio info frame transmission */
+               val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+               val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK,
+                                            HDMI_IFRAME_SLOT_AUDIO);
+               hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
        }
 
-       hdmi_infoframe_write_infopack(hdmi, buffer, ret);
-
        return 0;
 }
 
@@ -628,12 +665,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
 {
        struct drm_info_node *node = s->private;
        struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
        DBGFS_DUMP("\n", HDMI_CFG);
@@ -656,6 +687,10 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
        DBGFS_DUMP("", HDMI_SW_DI_CFG);
        hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
 
+       DBGFS_DUMP("\n", HDMI_AUDIO_CFG);
+       DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS);
+       DBGFS_DUMP("\n", HDMI_AUDN);
+
        seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
                   HDMI_IFRAME_SLOT_AVI);
        DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
@@ -690,7 +725,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
        DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
        seq_puts(s, "\n");
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -861,6 +895,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
 
        count = drm_add_edid_modes(connector, edid);
        drm_mode_connector_update_edid_property(connector, edid);
+       drm_edid_to_eld(connector, edid);
 
        kfree(edid);
        return count;
@@ -897,20 +932,10 @@ static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
-{
-       struct sti_hdmi_connector *hdmi_connector
-               = to_sti_hdmi_connector(connector);
-
-       /* Best encoder is the one associated during connector creation */
-       return hdmi_connector->encoder;
-}
-
 static const
 struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
        .get_modes = sti_hdmi_connector_get_modes,
        .mode_valid = sti_hdmi_connector_mode_valid,
-       .best_encoder = sti_hdmi_best_encoder,
 };
 
 /* get detection status of display device */
@@ -932,16 +957,6 @@ sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
        return connector_status_disconnected;
 }
 
-static void sti_hdmi_connector_destroy(struct drm_connector *connector)
-{
-       struct sti_hdmi_connector *hdmi_connector
-               = to_sti_hdmi_connector(connector);
-
-       drm_connector_unregister(connector);
-       drm_connector_cleanup(connector);
-       kfree(hdmi_connector);
-}
-
 static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
                                             struct drm_connector *connector)
 {
@@ -1024,17 +1039,31 @@ sti_hdmi_connector_get_property(struct drm_connector *connector,
        return -EINVAL;
 }
 
+static int sti_hdmi_late_register(struct drm_connector *connector)
+{
+       struct sti_hdmi_connector *hdmi_connector
+               = to_sti_hdmi_connector(connector);
+       struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+       if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
+               DRM_ERROR("HDMI debugfs setup failed\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
-       .dpms = drm_atomic_helper_connector_dpms,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .detect = sti_hdmi_connector_detect,
-       .destroy = sti_hdmi_connector_destroy,
+       .destroy = drm_connector_cleanup,
        .reset = drm_atomic_helper_connector_reset,
        .set_property = drm_atomic_helper_connector_set_property,
        .atomic_set_property = sti_hdmi_connector_set_property,
        .atomic_get_property = sti_hdmi_connector_get_property,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+       .late_register = sti_hdmi_late_register,
 };
 
 static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
@@ -1049,6 +1078,207 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
        return NULL;
 }
 
+/**
+ * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
+ * clocks. None-coherent clocks means that audio and TMDS clocks have not the
+ * same source (drifts between clocks). In this case assumption is that CTS is
+ * automatically calculated by hardware.
+ *
+ * @audio_fs: audio frame clock frequency in Hz
+ *
+ * Values computed are based on table described in HDMI specification 1.4b
+ *
+ * Returns n value.
+ */
+static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
+{
+       unsigned int n;
+
+       switch (audio_fs) {
+       case 32000:
+               n = 4096;
+               break;
+       case 44100:
+               n = 6272;
+               break;
+       case 48000:
+               n = 6144;
+               break;
+       case 88200:
+               n = 6272 * 2;
+               break;
+       case 96000:
+               n = 6144 * 2;
+               break;
+       case 176400:
+               n = 6272 * 4;
+               break;
+       case 192000:
+               n = 6144 * 4;
+               break;
+       default:
+               /* Not pre-defined, recommended value: 128 * fs / 1000 */
+               n = (audio_fs * 128) / 1000;
+       }
+
+       return n;
+}
+
+static int hdmi_audio_configure(struct sti_hdmi *hdmi,
+                               struct hdmi_audio_params *params)
+{
+       int audio_cfg, n;
+       struct hdmi_audio_infoframe *info = &params->cea;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       if (!hdmi->enabled)
+               return 0;
+
+       /* update N parameter */
+       n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
+
+       DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
+                        params->sample_rate, hdmi->mode.clock * 1000, n);
+       hdmi_write(hdmi, n, HDMI_AUDN);
+
+       /* update HDMI registers according to configuration */
+       audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+                   HDMI_AUD_CFG_ONE_BIT_INVALID;
+
+       switch (info->channels) {
+       case 8:
+               audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
+       case 6:
+               audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
+       case 4:
+               audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
+       case 2:
+               audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
+               break;
+       default:
+               DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
+                         info->channels);
+               return -EINVAL;
+       }
+
+       hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+       hdmi->audio = *params;
+
+       return hdmi_audio_infoframe_config(hdmi);
+}
+
+static void hdmi_audio_shutdown(struct device *dev, void *data)
+{
+       struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+       int audio_cfg;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       /* disable audio */
+       audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+                   HDMI_AUD_CFG_ONE_BIT_INVALID;
+       hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+       hdmi->audio.enabled = 0;
+       hdmi_audio_infoframe_config(hdmi);
+}
+
+static int hdmi_audio_hw_params(struct device *dev,
+                               void *data,
+                               struct hdmi_codec_daifmt *daifmt,
+                               struct hdmi_codec_params *params)
+{
+       struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+       int ret;
+       struct hdmi_audio_params audio = {
+               .sample_width = params->sample_width,
+               .sample_rate = params->sample_rate,
+               .cea = params->cea,
+       };
+
+       DRM_DEBUG_DRIVER("\n");
+
+       if (!hdmi->enabled)
+               return 0;
+
+       if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
+           daifmt->frame_clk_inv || daifmt->bit_clk_master ||
+           daifmt->frame_clk_master) {
+               dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
+                       daifmt->bit_clk_inv, daifmt->frame_clk_inv,
+                       daifmt->bit_clk_master,
+                       daifmt->frame_clk_master);
+               return -EINVAL;
+       }
+
+       audio.enabled = 1;
+
+       ret = hdmi_audio_configure(hdmi, &audio);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
+{
+       struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+
+       DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable");
+
+       if (enable)
+               hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK);
+       else
+               hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK);
+
+       return 0;
+}
+
+static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
+{
+       struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+       struct drm_connector *connector = hdmi->drm_connector;
+
+       DRM_DEBUG_DRIVER("\n");
+       memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+
+       return 0;
+}
+
+static const struct hdmi_codec_ops audio_codec_ops = {
+       .hw_params = hdmi_audio_hw_params,
+       .audio_shutdown = hdmi_audio_shutdown,
+       .digital_mute = hdmi_audio_digital_mute,
+       .get_eld = hdmi_audio_get_eld,
+};
+
+static int sti_hdmi_register_audio_driver(struct device *dev,
+                                         struct sti_hdmi *hdmi)
+{
+       struct hdmi_codec_pdata codec_data = {
+               .ops = &audio_codec_ops,
+               .max_i2s_channels = 8,
+               .i2s = 1,
+       };
+
+       DRM_DEBUG_DRIVER("\n");
+
+       hdmi->audio.enabled = 0;
+
+       hdmi->audio_pdev = platform_device_register_data(
+               dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
+               &codec_data, sizeof(codec_data));
+
+       if (IS_ERR(hdmi->audio_pdev))
+               return PTR_ERR(hdmi->audio_pdev);
+
+       DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev));
+
+       return 0;
+}
+
 static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        struct sti_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1095,9 +1325,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
        /* initialise property */
        sti_hdmi_connector_init_property(drm_dev, drm_connector);
 
-       err = drm_connector_register(drm_connector);
-       if (err)
-               goto err_connector;
+       hdmi->drm_connector = drm_connector;
 
        err = drm_mode_connector_attach_encoder(drm_connector, encoder);
        if (err) {
@@ -1105,19 +1333,27 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
                goto err_sysfs;
        }
 
+       err = sti_hdmi_register_audio_driver(dev, hdmi);
+       if (err) {
+               DRM_ERROR("Failed to attach an audio codec\n");
+               goto err_sysfs;
+       }
+
+       /* Initialize audio infoframe */
+       err = hdmi_audio_infoframe_init(&hdmi->audio.cea);
+       if (err) {
+               DRM_ERROR("Failed to init audio infoframe\n");
+               goto err_sysfs;
+       }
+
        /* Enable default interrupts */
        hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
 
-       if (hdmi_debugfs_init(hdmi, drm_dev->primary))
-               DRM_ERROR("HDMI debugfs setup failed\n");
-
        return 0;
 
 err_sysfs:
-       drm_connector_unregister(drm_connector);
-err_connector:
-       drm_connector_cleanup(drm_connector);
-
+       drm_bridge_remove(bridge);
+       hdmi->drm_connector = NULL;
        return -EINVAL;
 }
 
@@ -1267,6 +1503,8 @@ static int sti_hdmi_remove(struct platform_device *pdev)
        struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
 
        i2c_put_adapter(hdmi->ddc_adapt);
+       if (hdmi->audio_pdev)
+               platform_device_unregister(hdmi->audio_pdev);
        component_del(&pdev->dev, &sti_hdmi_ops);
 
        return 0;
index ef3a94583bbdc3857eb48b413fa961675c5cba33..119bc3582ac78fda5c9547e1855c65502557ffd5 100644 (file)
@@ -23,6 +23,13 @@ struct hdmi_phy_ops {
        void (*stop)(struct sti_hdmi *hdmi);
 };
 
+struct hdmi_audio_params {
+       bool enabled;
+       unsigned int sample_width;
+       unsigned int sample_rate;
+       struct hdmi_audio_infoframe cea;
+};
+
 /* values for the framing mode property */
 enum sti_hdmi_modes {
        HDMI_MODE_HDMI,
@@ -67,6 +74,9 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
  * @ddc_adapt: i2c ddc adapter
  * @colorspace: current colorspace selected
  * @hdmi_mode: select framing for HDMI or DVI
+ * @audio_pdev: ASoC hdmi-codec platform device
+ * @audio: hdmi audio parameters.
+ * @drm_connector: hdmi connector
  */
 struct sti_hdmi {
        struct device dev;
@@ -89,6 +99,9 @@ struct sti_hdmi {
        struct i2c_adapter *ddc_adapt;
        enum hdmi_colorspace colorspace;
        enum sti_hdmi_modes hdmi_mode;
+       struct platform_device *audio_pdev;
+       struct hdmi_audio_params audio;
+       struct drm_connector *drm_connector;
 };
 
 u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
index 1edec29b9e45b70f596060304526840e174b9fd4..b03232247966df15d0bb0c1e8ee1a694a9f0e5ec 100644 (file)
@@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
 {
        struct drm_info_node *node = s->private;
        struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
        int cmd, cmd_offset, infoxp70;
        void *virt;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(s, "%s: (vaddr = 0x%p)",
                   sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
@@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
 
        seq_puts(s, "\n");
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -1241,6 +1234,33 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
        .atomic_disable = sti_hqvdp_atomic_disable,
 };
 
+static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
+{
+       DRM_DEBUG_DRIVER("\n");
+
+       drm_plane_helper_disable(drm_plane);
+       drm_plane_cleanup(drm_plane);
+}
+
+static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
+{
+       struct sti_plane *plane = to_sti_plane(drm_plane);
+       struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+
+       return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+}
+
+struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .destroy = sti_hqvdp_destroy,
+       .set_property = sti_plane_set_property,
+       .reset = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+       .late_register = sti_hqvdp_late_register,
+};
+
 static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
                                          struct device *dev, int desc)
 {
@@ -1253,7 +1273,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
        sti_hqvdp_init(hqvdp);
 
        res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
-                                      &sti_plane_helpers_funcs,
+                                      &sti_hqvdp_plane_helpers_funcs,
                                       hqvdp_supported_formats,
                                       ARRAY_SIZE(hqvdp_supported_formats),
                                       DRM_PLANE_TYPE_OVERLAY, NULL);
@@ -1266,9 +1286,6 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
 
        sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
 
-       if (hqvdp_debugfs_init(hqvdp, drm_dev->primary))
-               DRM_ERROR("HQVDP debugfs setup failed\n");
-
        return &hqvdp->plane.drm_plane;
 }
 
@@ -1346,6 +1363,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
        vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
        if (vtg_np)
                hqvdp->vtg = of_vtg_find(vtg_np);
+       of_node_put(vtg_np);
 
        platform_set_drvdata(pdev, hqvdp);
 
index aed7801b51f714702bc20f2cdef10be16e2aefeb..1885c7ab5a8bd22b713f9ec46a3ad585316fe75d 100644 (file)
@@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
 {
        struct drm_info_node *node = s->private;
        struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(s, "%s: (vaddr = 0x%p)",
                   sti_mixer_to_str(mixer), mixer->regs);
@@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
        mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
        seq_puts(s, "\n");
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -188,7 +181,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
        { "mixer_aux", mixer_dbg_show, 0, NULL },
 };
 
-static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
 {
        unsigned int i;
        struct drm_info_list *mixer_debugfs_files;
@@ -400,8 +393,5 @@ struct sti_mixer *sti_mixer_create(struct device *dev,
        DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
                         sti_mixer_to_str(mixer), mixer->regs);
 
-       if (mixer_debugfs_init(mixer, drm_dev->primary))
-               DRM_ERROR("MIXER debugfs setup failed\n");
-
        return mixer;
 }
index 6f35fc086873dac70fc3a25d91be77b5fb2fc10d..830a3c42d88646dfcc670788f1db7c584dd64e74 100644 (file)
@@ -55,6 +55,8 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
 
 void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
 
+int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
+
 /* depth in Cross-bar control = z order */
 #define GAM_MIXER_NB_DEPTH_LEVEL 6
 
index f10c98d3f0121094e95fc99fc25bbcc7bc66aa37..0cf3335ef37c98bae8dac14be1156e284bf10f32 100644 (file)
@@ -45,25 +45,15 @@ const char *sti_plane_to_str(struct sti_plane *plane)
 
 #define STI_FPS_INTERVAL_MS     3000
 
-static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
-{
-       struct timespec tmp_ts = timespec_sub(lhs, rhs);
-       u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
-
-       do_div(tmp_ns, NSEC_PER_MSEC);
-
-       return (u32)tmp_ns;
-}
-
 void sti_plane_update_fps(struct sti_plane *plane,
                          bool new_frame,
                          bool new_field)
 {
-       struct timespec now;
+       ktime_t now;
        struct sti_fps_info *fps;
        int fpks, fipks, ms_since_last, num_frames, num_fields;
 
-       getrawmonotonic(&now);
+       now = ktime_get();
 
        /* Compute number of frame updates */
        fps = &plane->fps_info;
@@ -76,7 +66,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
                return;
 
        fps->curr_frame_counter++;
-       ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp);
+       ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp));
        num_frames = fps->curr_frame_counter - fps->last_frame_counter;
 
        if (num_frames <= 0  || ms_since_last < STI_FPS_INTERVAL_MS)
@@ -106,17 +96,9 @@ void sti_plane_update_fps(struct sti_plane *plane,
                         plane->fps_info.fips_str);
 }
 
-static void sti_plane_destroy(struct drm_plane *drm_plane)
-{
-       DRM_DEBUG_DRIVER("\n");
-
-       drm_plane_helper_disable(drm_plane);
-       drm_plane_cleanup(drm_plane);
-}
-
-static int sti_plane_set_property(struct drm_plane *drm_plane,
-                                 struct drm_property *property,
-                                 uint64_t val)
+int sti_plane_set_property(struct drm_plane *drm_plane,
+                          struct drm_property *property,
+                          uint64_t val)
 {
        struct drm_device *dev = drm_plane->dev;
        struct sti_private *private = dev->dev_private;
@@ -170,13 +152,3 @@ void sti_plane_init_property(struct sti_plane *plane,
                         plane->drm_plane.base.id,
                         sti_plane_to_str(plane), plane->zorder);
 }
-
-struct drm_plane_funcs sti_plane_helpers_funcs = {
-       .update_plane = drm_atomic_helper_update_plane,
-       .disable_plane = drm_atomic_helper_disable_plane,
-       .destroy = sti_plane_destroy,
-       .set_property = sti_plane_set_property,
-       .reset = drm_atomic_helper_plane_reset,
-       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
-       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
index c50a3b9f5d378cd42729705f534431dd482ded68..e0ea1dd3bb88984c8880eb9afb7f9e867de9398e 100644 (file)
@@ -11,8 +11,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_plane_helper.h>
 
-extern struct drm_plane_funcs sti_plane_helpers_funcs;
-
 #define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
 
 #define STI_PLANE_TYPE_SHIFT 8
@@ -57,7 +55,7 @@ struct sti_fps_info {
        unsigned int last_frame_counter;
        unsigned int curr_field_counter;
        unsigned int last_field_counter;
-       struct timespec last_timestamp;
+       ktime_t      last_timestamp;
        char fps_str[FPS_LENGTH];
        char fips_str[FPS_LENGTH];
 };
@@ -83,6 +81,11 @@ const char *sti_plane_to_str(struct sti_plane *plane);
 void sti_plane_update_fps(struct sti_plane *plane,
                          bool new_frame,
                          bool new_field);
+
+int sti_plane_set_property(struct drm_plane *drm_plane,
+                          struct drm_property *property,
+                          uint64_t val);
+
 void sti_plane_init_property(struct sti_plane *plane,
                             enum drm_plane_type type);
 #endif
index f983db5a59da1f33fbdc349ac896435a151cc2e1..e25995b35715f3ddf4c0ca32e42ec97cdf4d2d82 100644 (file)
@@ -112,6 +112,7 @@ struct sti_tvout {
        struct drm_encoder *hdmi;
        struct drm_encoder *hda;
        struct drm_encoder *dvo;
+       bool debugfs_registered;
 };
 
 struct sti_tvout_encoder {
@@ -515,13 +516,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
 {
        struct drm_info_node *node = s->private;
        struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
        struct drm_crtc *crtc;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
 
@@ -587,7 +582,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
        DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
        seq_puts(s, "\n");
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -632,8 +626,37 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
        kfree(sti_encoder);
 }
 
+static int sti_tvout_late_register(struct drm_encoder *encoder)
+{
+       struct sti_tvout *tvout = to_sti_tvout(encoder);
+       int ret;
+
+       if (tvout->debugfs_registered)
+               return 0;
+
+       ret = tvout_debugfs_init(tvout, encoder->dev->primary);
+       if (ret)
+               return ret;
+
+       tvout->debugfs_registered = true;
+       return 0;
+}
+
+static void sti_tvout_early_unregister(struct drm_encoder *encoder)
+{
+       struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+       if (!tvout->debugfs_registered)
+               return;
+
+       tvout_debugfs_exit(tvout, encoder->dev->primary);
+       tvout->debugfs_registered = false;
+}
+
 static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
        .destroy = sti_tvout_encoder_destroy,
+       .late_register = sti_tvout_late_register,
+       .early_unregister = sti_tvout_early_unregister,
 };
 
 static void sti_dvo_encoder_enable(struct drm_encoder *encoder)
@@ -820,9 +843,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
 
        sti_tvout_create_encoders(drm_dev, tvout);
 
-       if (tvout_debugfs_init(tvout, drm_dev->primary))
-               DRM_ERROR("TVOUT debugfs setup failed\n");
-
        return 0;
 }
 
@@ -830,11 +850,8 @@ static void sti_tvout_unbind(struct device *dev, struct device *master,
        void *data)
 {
        struct sti_tvout *tvout = dev_get_drvdata(dev);
-       struct drm_device *drm_dev = data;
 
        sti_tvout_destroy_encoders(tvout);
-
-       tvout_debugfs_exit(tvout, drm_dev->primary);
 }
 
 static const struct component_ops sti_tvout_ops = {
index 523ed19f5ac6351408c73acff5287da9284af5a6..47634a0251fca699916479b4a67ea7fa5f612f86 100644 (file)
@@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
 {
        struct drm_info_node *node = s->private;
        struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
-       struct drm_device *dev = node->minor->dev;
-       int ret;
-
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
 
        seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
 
@@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
        DBGFS_DUMP(VID_CSAT);
        seq_puts(s, "\n");
 
-       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -130,7 +123,7 @@ static struct drm_info_list vid_debugfs_files[] = {
        { "vid", vid_dbg_show, 0, NULL },
 };
 
-static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
 {
        unsigned int i;
 
@@ -227,8 +220,5 @@ struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
 
        sti_vid_init(vid);
 
-       if (vid_debugfs_init(vid, drm_dev->primary))
-               DRM_ERROR("VID debugfs setup failed\n");
-
        return vid;
 }
index 6c842344f3d8f3db36ec0e44e115340ac442d2c3..fdc90f922a0544d630c41547058b93bd53ba30fd 100644 (file)
@@ -26,4 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
 struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
                               int id, void __iomem *baseaddr);
 
+int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
+
 #endif
index 6bf4ce466d20fc194e968098bf76cca1efa77d84..0bdc385eec178663b823df0b83c5ed2a320965e8 100644 (file)
@@ -65,7 +65,7 @@
 #define HDMI_DELAY          (5)
 
 /* Delay introduced by the DVO in nb of pixel */
-#define DVO_DELAY           (2)
+#define DVO_DELAY           (7)
 
 /* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
 #define AWG_DELAY_HD        (-9)
@@ -432,6 +432,7 @@ static int vtg_probe(struct platform_device *pdev)
        np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
        if (np) {
                vtg->slave = of_vtg_find(np);
+               of_node_put(np);
 
                if (!vtg->slave)
                        return -EPROBE_DEFER;
index 99510e64e91aeece76c5cb0abe40764f138ca911..a4b357db8856f436b843811f18d05ebcfde8911a 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_SUN4I
        tristate "DRM Support for Allwinner A10 Display Engine"
-       depends on DRM && ARM
+       depends on DRM && ARM && COMMON_CLK
        depends on ARCH_SUNXI || COMPILE_TEST
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
index f7a15c1a93bf1a8a71eadeda0082dc309ef64a99..3ab560450a82e666795eddc285214391ee763e71 100644 (file)
@@ -190,7 +190,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
        /* Get the physical address of the buffer in memory */
        gem = drm_fb_cma_get_gem_obj(fb, 0);
 
-       DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr);
+       DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
 
        /* Compute the start of the displayed memory */
        bpp = drm_format_plane_cpp(fb->pixel_format, 0);
@@ -198,7 +198,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
        paddr += (state->src_x >> 16) * bpp;
        paddr += (state->src_y >> 16) * fb->pitches[0];
 
-       DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr);
+       DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
 
        /* Write the 32 lower bits of the address (in bits) */
        lo_paddr = paddr << 3;
index 4182a21f5923dddbc06d2042bb82837f0f7ab0f5..4a192210574f5ed442d09e33babb8697eacd4bad 100644 (file)
@@ -51,10 +51,22 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
 {
        struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
        struct sun4i_drv *drv = scrtc->drv;
+       struct drm_pending_vblank_event *event = crtc->state->event;
 
        DRM_DEBUG_DRIVER("Committing plane changes\n");
 
        sun4i_backend_commit(drv->backend);
+
+       if (event) {
+               crtc->state->event = NULL;
+
+               spin_lock_irq(&crtc->dev->event_lock);
+               if (drm_crtc_vblank_get(crtc) == 0)
+                       drm_crtc_arm_vblank_event(crtc, event);
+               else
+                       drm_crtc_send_vblank_event(crtc, event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+       }
 }
 
 static void sun4i_crtc_disable(struct drm_crtc *crtc)
@@ -65,6 +77,14 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
        DRM_DEBUG_DRIVER("Disabling the CRTC\n");
 
        sun4i_tcon_disable(drv->tcon);
+
+       if (crtc->state->event && !crtc->state->active) {
+               spin_lock_irq(&crtc->dev->event_lock);
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+
+               crtc->state->event = NULL;
+       }
 }
 
 static void sun4i_crtc_enable(struct drm_crtc *crtc)
index 3ff668cb463c2af3f3c1f754d58b00e65fa08d4b..5b3463197c488ae3eac98500eeda121443406304 100644 (file)
@@ -72,14 +72,40 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
 static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
                                  unsigned long *parent_rate)
 {
-       return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate);
+       unsigned long best_parent = 0;
+       u8 best_div = 1;
+       int i;
+
+       for (i = 6; i < 127; i++) {
+               unsigned long ideal = rate * i;
+               unsigned long rounded;
+
+               rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
+                                           ideal);
+
+               if (rounded == ideal) {
+                       best_parent = rounded;
+                       best_div = i;
+                       goto out;
+               }
+
+               if ((rounded < ideal) && (rounded > best_parent)) {
+                       best_parent = rounded;
+                       best_div = i;
+               }
+       }
+
+out:
+       *parent_rate = best_parent;
+
+       return best_parent / best_div;
 }
 
 static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
                               unsigned long parent_rate)
 {
        struct sun4i_dclk *dclk = hw_to_dclk(hw);
-       int div = DIV_ROUND_CLOSEST(parent_rate, rate);
+       u8 div = parent_rate / rate;
 
        return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
                                  GENMASK(6, 0), div);
@@ -127,10 +153,14 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
        const char *clk_name, *parent_name;
        struct clk_init_data init;
        struct sun4i_dclk *dclk;
+       int ret;
 
        parent_name = __clk_get_name(tcon->sclk0);
-       of_property_read_string_index(dev->of_node, "clock-output-names", 0,
-                                     &clk_name);
+       ret = of_property_read_string_index(dev->of_node,
+                                           "clock-output-names", 0,
+                                           &clk_name);
+       if (ret)
+               return ret;
 
        dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
        if (!dclk)
@@ -140,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
        init.ops = &sun4i_dclk_ops;
        init.parent_names = &parent_name;
        init.num_parents = 1;
+       init.flags = CLK_SET_RATE_PARENT;
 
        dclk->regmap = tcon->regs;
        dclk->hw.init = &init;
index 76e922bb60e5430cb440ae13a743e2031219084e..7092daaf6c432b8fa8d2bb93bda63785cf307f01 100644 (file)
 #include "sun4i_layer.h"
 #include "sun4i_tcon.h"
 
-static int sun4i_drv_connector_plug_all(struct drm_device *drm)
-{
-       struct drm_connector *connector, *failed;
-       int ret;
-
-       mutex_lock(&drm->mode_config.mutex);
-       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               ret = drm_connector_register(connector);
-               if (ret) {
-                       failed = connector;
-                       goto err;
-               }
-       }
-       mutex_unlock(&drm->mode_config.mutex);
-       return 0;
-
-err:
-       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               if (failed == connector)
-                       break;
-
-               drm_connector_unregister(connector);
-       }
-       mutex_unlock(&drm->mode_config.mutex);
-
-       return ret;
-}
-
 static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe)
 {
        struct sun4i_drv *drv = drm->dev_private;
@@ -103,7 +75,7 @@ static struct drm_driver sun4i_drv_driver = {
        .dumb_create            = drm_gem_cma_dumb_create,
        .dumb_destroy           = drm_gem_dumb_destroy,
        .dumb_map_offset        = drm_gem_cma_dumb_map_offset,
-       .gem_free_object        = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
 
        /* PRIME Operations */
@@ -120,11 +92,27 @@ static struct drm_driver sun4i_drv_driver = {
        /* Frame Buffer Operations */
 
        /* VBlank Operations */
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = sun4i_drv_enable_vblank,
        .disable_vblank         = sun4i_drv_disable_vblank,
 };
 
+static void sun4i_remove_framebuffers(void)
+{
+       struct apertures_struct *ap;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       /* The framebuffer can be located anywhere in RAM */
+       ap->ranges[0].base = 0;
+       ap->ranges[0].size = ~0;
+
+       remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
+       kfree(ap);
+}
+
 static int sun4i_drv_bind(struct device *dev)
 {
        struct drm_device *drm;
@@ -135,10 +123,6 @@ static int sun4i_drv_bind(struct device *dev)
        if (!drm)
                return -ENOMEM;
 
-       ret = drm_dev_set_unique(drm, dev_name(drm->dev));
-       if (ret)
-               goto free_drm;
-
        drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
        if (!drv) {
                ret = -ENOMEM;
@@ -172,6 +156,9 @@ static int sun4i_drv_bind(struct device *dev)
        }
        drm->irq_enabled = true;
 
+       /* Remove early framebuffers (ie. simplefb) */
+       sun4i_remove_framebuffers();
+
        /* Create our framebuffer */
        drv->fbdev = sun4i_framebuffer_init(drm);
        if (IS_ERR(drv->fbdev)) {
@@ -187,14 +174,8 @@ static int sun4i_drv_bind(struct device *dev)
        if (ret)
                goto free_drm;
 
-       ret = sun4i_drv_connector_plug_all(drm);
-       if (ret)
-               goto unregister_drm;
-
        return 0;
 
-unregister_drm:
-       drm_dev_unregister(drm);
 free_drm:
        drm_dev_unref(drm);
        return ret;
@@ -318,6 +299,7 @@ static int sun4i_drv_probe(struct platform_device *pdev)
 
                count += sun4i_drv_add_endpoints(&pdev->dev, &match,
                                                pipeline);
+               of_node_put(pipeline);
 
                DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
                                 count, i);
index a0b30c216a5bf5a96a2077eb10282102bfa0f5a7..70688febd7ac8b08ee064ffcaabef506e63515eb 100644 (file)
@@ -20,8 +20,7 @@ static void sun4i_de_output_poll_changed(struct drm_device *drm)
 {
        struct sun4i_drv *drv = drm->dev_private;
 
-       if (drv->fbdev)
-               drm_fbdev_cma_hotplug_event(drv->fbdev);
+       drm_fbdev_cma_hotplug_event(drv->fbdev);
 }
 
 static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
index ab64948180501d9463f211a8017e4babff4a7168..f5bbac6efb4c8c01e5f7cd709de037f168a92eb6 100644 (file)
@@ -54,8 +54,13 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
 static int sun4i_rgb_mode_valid(struct drm_connector *connector,
                                struct drm_display_mode *mode)
 {
+       struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+       struct sun4i_drv *drv = rgb->drv;
+       struct sun4i_tcon *tcon = drv->tcon;
        u32 hsync = mode->hsync_end - mode->hsync_start;
        u32 vsync = mode->vsync_end - mode->vsync_start;
+       unsigned long rate = mode->clock * 1000;
+       long rounded_rate;
 
        DRM_DEBUG_DRIVER("Validating modes...\n");
 
@@ -87,22 +92,21 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
 
        DRM_DEBUG_DRIVER("Vertical parameters OK\n");
 
-       return MODE_OK;
-}
+       rounded_rate = clk_round_rate(tcon->dclk, rate);
+       if (rounded_rate < rate)
+               return MODE_CLOCK_LOW;
 
-static struct drm_encoder *
-sun4i_rgb_best_encoder(struct drm_connector *connector)
-{
-       struct sun4i_rgb *rgb =
-               drm_connector_to_sun4i_rgb(connector);
+       if (rounded_rate > rate)
+               return MODE_CLOCK_HIGH;
 
-       return &rgb->encoder;
+       DRM_DEBUG_DRIVER("Clock rate OK\n");
+
+       return MODE_OK;
 }
 
 static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
        .get_modes      = sun4i_rgb_get_modes,
        .mode_valid     = sun4i_rgb_mode_valid,
-       .best_encoder   = sun4i_rgb_best_encoder,
 };
 
 static enum drm_connector_status
@@ -203,7 +207,7 @@ int sun4i_rgb_init(struct drm_device *drm)
        int ret;
 
        /* If we don't have a panel, there's no point in going on */
-       if (!tcon->panel)
+       if (IS_ERR(tcon->panel))
                return -ENODEV;
 
        rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
index 9f19b0e08560e681f50bfe00635ecefef18ef885..652385f09735c3f4bb11b713809cc6b67034d459 100644 (file)
@@ -425,11 +425,11 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
 
        remote = of_graph_get_remote_port_parent(end_node);
        if (!remote) {
-               DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+               DRM_DEBUG_DRIVER("Unable to parse remote node\n");
                return ERR_PTR(-EINVAL);
        }
 
-       return of_drm_find_panel(remote);
+       return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
 }
 
 static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -490,7 +490,11 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
                return 0;
        }
 
-       return sun4i_rgb_init(drm);
+       ret = sun4i_rgb_init(drm);
+       if (ret < 0)
+               goto err_free_clocks;
+
+       return 0;
 
 err_free_clocks:
        sun4i_tcon_free_clocks(tcon);
@@ -522,12 +526,13 @@ static int sun4i_tcon_probe(struct platform_device *pdev)
         * Defer the probe.
         */
        panel = sun4i_tcon_find_panel(node);
-       if (IS_ERR(panel)) {
-               /*
-                * If we don't have a panel endpoint, just go on
-                */
-               if (PTR_ERR(panel) != -ENODEV)
-                       return -EPROBE_DEFER;
+
+       /*
+        * If we don't have a panel endpoint, just go on
+        */
+       if (PTR_ERR(panel) == -EPROBE_DEFER) {
+               DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+               return -EPROBE_DEFER;
        }
 
        return component_add(&pdev->dev, &sun4i_tcon_ops);
index bc047f92350876ec1bec330f2090287fe20e04dd..b84147896294a62df47c8ffb37c2c64fd92e7867 100644 (file)
@@ -526,18 +526,9 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static struct drm_encoder *
-sun4i_tv_comp_best_encoder(struct drm_connector *connector)
-{
-       struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector);
-
-       return &tv->encoder;
-}
-
 static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
        .get_modes      = sun4i_tv_comp_get_modes,
        .mode_valid     = sun4i_tv_comp_mode_valid,
-       .best_encoder   = sun4i_tv_comp_best_encoder,
 };
 
 static enum drm_connector_status
index 39940f5b7c91416e08cb61797798f9b2e0838ff7..8495bd01b5449e886c723b285199a37cf8e8a520 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/debugfs.h>
 #include <linux/iommu.h>
+#include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
 #include <soc/tegra/pmc.h>
@@ -1216,6 +1217,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
 
        tegra_dc_stats_reset(&dc->stats);
        drm_crtc_vblank_off(crtc);
+
+       pm_runtime_put_sync(dc->dev);
 }
 
 static void tegra_crtc_enable(struct drm_crtc *crtc)
@@ -1225,6 +1228,48 @@ static void tegra_crtc_enable(struct drm_crtc *crtc)
        struct tegra_dc *dc = to_tegra_dc(crtc);
        u32 value;
 
+       pm_runtime_get_sync(dc->dev);
+
+       /* initialize display controller */
+       if (dc->syncpt) {
+               u32 syncpt = host1x_syncpt_id(dc->syncpt);
+
+               value = SYNCPT_CNTRL_NO_STALL;
+               tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
+
+               value = SYNCPT_VSYNC_ENABLE | syncpt;
+               tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
+       }
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
+
+       /* initialize timer */
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
+               WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
+
+       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
+               WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
+       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
+
+       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
+
+       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
+               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
+       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
+
+       if (dc->soc->supports_border_color)
+               tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
+
+       /* apply PLL and pixel clock changes */
        tegra_dc_commit_state(dc, state);
 
        /* program display mode */
@@ -1685,7 +1730,6 @@ static int tegra_dc_init(struct host1x_client *client)
        struct tegra_drm *tegra = drm->dev_private;
        struct drm_plane *primary = NULL;
        struct drm_plane *cursor = NULL;
-       u32 value;
        int err;
 
        dc->syncpt = host1x_syncpt_request(dc->dev, flags);
@@ -1755,47 +1799,6 @@ static int tegra_dc_init(struct host1x_client *client)
                goto cleanup;
        }
 
-       /* initialize display controller */
-       if (dc->syncpt) {
-               u32 syncpt = host1x_syncpt_id(dc->syncpt);
-
-               value = SYNCPT_CNTRL_NO_STALL;
-               tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
-
-               value = SYNCPT_VSYNC_ENABLE | syncpt;
-               tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
-       }
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
-
-       /* initialize timer */
-       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
-               WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
-       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
-
-       value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
-               WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
-       tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
-
-       value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
-
-       value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
-               WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
-       tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
-
-       if (dc->soc->supports_border_color)
-               tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
-
-       tegra_dc_stats_reset(&dc->stats);
-
        return 0;
 
 cleanup:
@@ -1987,33 +1990,15 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return PTR_ERR(dc->rst);
        }
 
+       reset_control_assert(dc->rst);
+
        if (dc->soc->has_powergate) {
                if (dc->pipe == 0)
                        dc->powergate = TEGRA_POWERGATE_DIS;
                else
                        dc->powergate = TEGRA_POWERGATE_DISB;
 
-               err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
-                                                       dc->rst);
-               if (err < 0) {
-                       dev_err(&pdev->dev, "failed to power partition: %d\n",
-                               err);
-                       return err;
-               }
-       } else {
-               err = clk_prepare_enable(dc->clk);
-               if (err < 0) {
-                       dev_err(&pdev->dev, "failed to enable clock: %d\n",
-                               err);
-                       return err;
-               }
-
-               err = reset_control_deassert(dc->rst);
-               if (err < 0) {
-                       dev_err(&pdev->dev, "failed to deassert reset: %d\n",
-                               err);
-                       return err;
-               }
+               tegra_powergate_power_off(dc->powergate);
        }
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2027,16 +2012,19 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       INIT_LIST_HEAD(&dc->client.list);
-       dc->client.ops = &dc_client_ops;
-       dc->client.dev = &pdev->dev;
-
        err = tegra_dc_rgb_probe(dc);
        if (err < 0 && err != -ENODEV) {
                dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
                return err;
        }
 
+       platform_set_drvdata(pdev, dc);
+       pm_runtime_enable(&pdev->dev);
+
+       INIT_LIST_HEAD(&dc->client.list);
+       dc->client.ops = &dc_client_ops;
+       dc->client.dev = &pdev->dev;
+
        err = host1x_client_register(&dc->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register host1x client: %d\n",
@@ -2044,8 +2032,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return err;
        }
 
-       platform_set_drvdata(pdev, dc);
-
        return 0;
 }
 
@@ -2067,7 +2053,22 @@ static int tegra_dc_remove(struct platform_device *pdev)
                return err;
        }
 
-       reset_control_assert(dc->rst);
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dc_suspend(struct device *dev)
+{
+       struct tegra_dc *dc = dev_get_drvdata(dev);
+       int err;
+
+       err = reset_control_assert(dc->rst);
+       if (err < 0) {
+               dev_err(dev, "failed to assert reset: %d\n", err);
+               return err;
+       }
 
        if (dc->soc->has_powergate)
                tegra_powergate_power_off(dc->powergate);
@@ -2077,10 +2078,45 @@ static int tegra_dc_remove(struct platform_device *pdev)
        return 0;
 }
 
+static int tegra_dc_resume(struct device *dev)
+{
+       struct tegra_dc *dc = dev_get_drvdata(dev);
+       int err;
+
+       if (dc->soc->has_powergate) {
+               err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
+                                                       dc->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to power partition: %d\n", err);
+                       return err;
+               }
+       } else {
+               err = clk_prepare_enable(dc->clk);
+               if (err < 0) {
+                       dev_err(dev, "failed to enable clock: %d\n", err);
+                       return err;
+               }
+
+               err = reset_control_deassert(dc->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to deassert reset: %d\n", err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dc_pm_ops = {
+       SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
+};
+
 struct platform_driver tegra_dc_driver = {
        .driver = {
                .name = "tegra-dc",
                .of_match_table = tegra_dc_of_match,
+               .pm = &tegra_dc_pm_ops,
        },
        .probe = tegra_dc_probe,
        .remove = tegra_dc_remove,
index b24a0f14821a53dc65d70c46440a9b1feff92824..059f409556d55dd1c617917b60ce728d7d3a67a4 100644 (file)
@@ -12,6 +12,9 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/of_gpio.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 #include <linux/regulator/consumer.h>
@@ -44,6 +47,11 @@ struct tegra_dpaux {
        struct completion complete;
        struct work_struct work;
        struct list_head list;
+
+#ifdef CONFIG_GENERIC_PINCONF
+       struct pinctrl_dev *pinctrl;
+       struct pinctrl_desc desc;
+#endif
 };
 
 static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
@@ -267,6 +275,148 @@ static irqreturn_t tegra_dpaux_irq(int irq, void *data)
        return ret;
 }
 
+enum tegra_dpaux_functions {
+       DPAUX_PADCTL_FUNC_AUX,
+       DPAUX_PADCTL_FUNC_I2C,
+       DPAUX_PADCTL_FUNC_OFF,
+};
+
+static void tegra_dpaux_pad_power_down(struct tegra_dpaux *dpaux)
+{
+       u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+       value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static void tegra_dpaux_pad_power_up(struct tegra_dpaux *dpaux)
+{
+       u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
+
+       value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
+
+       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+}
+
+static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
+{
+       u32 value;
+
+       switch (function) {
+       case DPAUX_PADCTL_FUNC_AUX:
+               value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
+                       DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
+                       DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
+                       DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
+                       DPAUX_HYBRID_PADCTL_MODE_AUX;
+               break;
+
+       case DPAUX_PADCTL_FUNC_I2C:
+               value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
+                       DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+                       DPAUX_HYBRID_PADCTL_MODE_I2C;
+               break;
+
+       case DPAUX_PADCTL_FUNC_OFF:
+               tegra_dpaux_pad_power_down(dpaux);
+               return 0;
+
+       default:
+               return -ENOTSUPP;
+       }
+
+       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+       tegra_dpaux_pad_power_up(dpaux);
+
+       return 0;
+}
+
+#ifdef CONFIG_GENERIC_PINCONF
+static const struct pinctrl_pin_desc tegra_dpaux_pins[] = {
+       PINCTRL_PIN(0, "DP_AUX_CHx_P"),
+       PINCTRL_PIN(1, "DP_AUX_CHx_N"),
+};
+
+static const unsigned tegra_dpaux_pin_numbers[] = { 0, 1 };
+
+static const char * const tegra_dpaux_groups[] = {
+       "dpaux-io",
+};
+
+static const char * const tegra_dpaux_functions[] = {
+       "aux",
+       "i2c",
+       "off",
+};
+
+static int tegra_dpaux_get_groups_count(struct pinctrl_dev *pinctrl)
+{
+       return ARRAY_SIZE(tegra_dpaux_groups);
+}
+
+static const char *tegra_dpaux_get_group_name(struct pinctrl_dev *pinctrl,
+                                             unsigned int group)
+{
+       return tegra_dpaux_groups[group];
+}
+
+static int tegra_dpaux_get_group_pins(struct pinctrl_dev *pinctrl,
+                                     unsigned group, const unsigned **pins,
+                                     unsigned *num_pins)
+{
+       *pins = tegra_dpaux_pin_numbers;
+       *num_pins = ARRAY_SIZE(tegra_dpaux_pin_numbers);
+
+       return 0;
+}
+
+static const struct pinctrl_ops tegra_dpaux_pinctrl_ops = {
+       .get_groups_count = tegra_dpaux_get_groups_count,
+       .get_group_name = tegra_dpaux_get_group_name,
+       .get_group_pins = tegra_dpaux_get_group_pins,
+       .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+       .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int tegra_dpaux_get_functions_count(struct pinctrl_dev *pinctrl)
+{
+       return ARRAY_SIZE(tegra_dpaux_functions);
+}
+
+static const char *tegra_dpaux_get_function_name(struct pinctrl_dev *pinctrl,
+                                                unsigned int function)
+{
+       return tegra_dpaux_functions[function];
+}
+
+static int tegra_dpaux_get_function_groups(struct pinctrl_dev *pinctrl,
+                                          unsigned int function,
+                                          const char * const **groups,
+                                          unsigned * const num_groups)
+{
+       *num_groups = ARRAY_SIZE(tegra_dpaux_groups);
+       *groups = tegra_dpaux_groups;
+
+       return 0;
+}
+
+static int tegra_dpaux_set_mux(struct pinctrl_dev *pinctrl,
+                              unsigned int function, unsigned int group)
+{
+       struct tegra_dpaux *dpaux = pinctrl_dev_get_drvdata(pinctrl);
+
+       return tegra_dpaux_pad_config(dpaux, function);
+}
+
+static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
+       .get_functions_count = tegra_dpaux_get_functions_count,
+       .get_function_name = tegra_dpaux_get_function_name,
+       .get_function_groups = tegra_dpaux_get_function_groups,
+       .set_mux = tegra_dpaux_set_mux,
+};
+#endif
+
 static int tegra_dpaux_probe(struct platform_device *pdev)
 {
        struct tegra_dpaux *dpaux;
@@ -294,11 +444,14 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
-       if (IS_ERR(dpaux->rst)) {
-               dev_err(&pdev->dev, "failed to get reset control: %ld\n",
-                       PTR_ERR(dpaux->rst));
-               return PTR_ERR(dpaux->rst);
+       if (!pdev->dev.pm_domain) {
+               dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
+               if (IS_ERR(dpaux->rst)) {
+                       dev_err(&pdev->dev,
+                               "failed to get reset control: %ld\n",
+                               PTR_ERR(dpaux->rst));
+                       return PTR_ERR(dpaux->rst);
+               }
        }
 
        dpaux->clk = devm_clk_get(&pdev->dev, NULL);
@@ -315,34 +468,37 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
                return err;
        }
 
-       reset_control_deassert(dpaux->rst);
+       if (dpaux->rst)
+               reset_control_deassert(dpaux->rst);
 
        dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
        if (IS_ERR(dpaux->clk_parent)) {
                dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
                        PTR_ERR(dpaux->clk_parent));
-               return PTR_ERR(dpaux->clk_parent);
+               err = PTR_ERR(dpaux->clk_parent);
+               goto assert_reset;
        }
 
        err = clk_prepare_enable(dpaux->clk_parent);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
                        err);
-               return err;
+               goto assert_reset;
        }
 
        err = clk_set_rate(dpaux->clk_parent, 270000000);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
                        err);
-               return err;
+               goto disable_parent_clk;
        }
 
        dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
        if (IS_ERR(dpaux->vdd)) {
                dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
                        PTR_ERR(dpaux->vdd));
-               return PTR_ERR(dpaux->vdd);
+               err = PTR_ERR(dpaux->vdd);
+               goto disable_parent_clk;
        }
 
        err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
@@ -350,7 +506,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
        if (err < 0) {
                dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
                        dpaux->irq, err);
-               return err;
+               goto disable_parent_clk;
        }
 
        disable_irq(dpaux->irq);
@@ -360,7 +516,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
 
        err = drm_dp_aux_register(&dpaux->aux);
        if (err < 0)
-               return err;
+               goto disable_parent_clk;
 
        /*
         * Assume that by default the DPAUX/I2C pads will be used for HDMI,
@@ -370,16 +526,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
         * is no possibility to perform the I2C mode configuration in the
         * HDMI path.
         */
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
-       value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
-
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
-       value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
-               DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
-               DPAUX_HYBRID_PADCTL_MODE_I2C;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
+       err = tegra_dpaux_pad_config(dpaux, DPAUX_HYBRID_PADCTL_MODE_I2C);
+       if (err < 0)
+               return err;
 
+#ifdef CONFIG_GENERIC_PINCONF
+       dpaux->desc.name = dev_name(&pdev->dev);
+       dpaux->desc.pins = tegra_dpaux_pins;
+       dpaux->desc.npins = ARRAY_SIZE(tegra_dpaux_pins);
+       dpaux->desc.pctlops = &tegra_dpaux_pinctrl_ops;
+       dpaux->desc.pmxops = &tegra_dpaux_pinmux_ops;
+       dpaux->desc.owner = THIS_MODULE;
+
+       dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
+       if (!dpaux->pinctrl) {
+               dev_err(&pdev->dev, "failed to register pincontrol\n");
+               return -ENODEV;
+       }
+#endif
        /* enable and clear all interrupts */
        value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
                DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -393,17 +557,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dpaux);
 
        return 0;
+
+disable_parent_clk:
+       clk_disable_unprepare(dpaux->clk_parent);
+assert_reset:
+       if (dpaux->rst)
+               reset_control_assert(dpaux->rst);
+
+       clk_disable_unprepare(dpaux->clk);
+
+       return err;
 }
 
 static int tegra_dpaux_remove(struct platform_device *pdev)
 {
        struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
-       u32 value;
 
        /* make sure pads are powered down when not in use */
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
-       value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+       tegra_dpaux_pad_power_down(dpaux);
 
        drm_dp_aux_unregister(&dpaux->aux);
 
@@ -414,7 +585,10 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
        cancel_work_sync(&dpaux->work);
 
        clk_disable_unprepare(dpaux->clk_parent);
-       reset_control_assert(dpaux->rst);
+
+       if (dpaux->rst)
+               reset_control_assert(dpaux->rst);
+
        clk_disable_unprepare(dpaux->clk);
 
        return 0;
@@ -528,30 +702,15 @@ enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
 int drm_dp_aux_enable(struct drm_dp_aux *aux)
 {
        struct tegra_dpaux *dpaux = to_dpaux(aux);
-       u32 value;
-
-       value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
-               DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
-               DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
-               DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
-               DPAUX_HYBRID_PADCTL_MODE_AUX;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
-
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
-       value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
 
-       return 0;
+       return tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_AUX);
 }
 
 int drm_dp_aux_disable(struct drm_dp_aux *aux)
 {
        struct tegra_dpaux *dpaux = to_dpaux(aux);
-       u32 value;
 
-       value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
-       value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
-       tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
+       tegra_dpaux_pad_power_down(dpaux);
 
        return 0;
 }
index b59c3bf0df4443420a236f032d8af0c6345482d6..755264d9db229be535a05654250cd69e6876a722 100644 (file)
@@ -56,8 +56,8 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
         */
 
        drm_atomic_helper_commit_modeset_disables(drm, state);
-       drm_atomic_helper_commit_planes(drm, state, false);
        drm_atomic_helper_commit_modeset_enables(drm, state);
+       drm_atomic_helper_commit_planes(drm, state, true);
 
        drm_atomic_helper_wait_for_vblanks(drm, state);
 
@@ -93,7 +93,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
         * the software side now.
         */
 
-       drm_atomic_helper_swap_state(drm, state);
+       drm_atomic_helper_swap_state(state, true);
 
        if (nonblock)
                tegra_atomic_schedule(tegra, state);
index f52d6cb24ff524ce771dd9aeabaf121c19824202..0ddcce1b420df0166318cb5ec6491b186bc82926 100644 (file)
@@ -239,8 +239,6 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
 void tegra_output_exit(struct tegra_output *output);
 
 int tegra_output_connector_get_modes(struct drm_connector *connector);
-struct drm_encoder *
-tegra_output_connector_best_encoder(struct drm_connector *connector);
 enum drm_connector_status
 tegra_output_connector_detect(struct drm_connector *connector, bool force);
 void tegra_output_connector_destroy(struct drm_connector *connector);
index d1239ebc190f4f2a33b8eb2c419bc18b171132cb..3d228ad90e0f14a9c8ad5656738afd0c8cc33cca 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/reset.h>
 
 #include <linux/regulator/consumer.h>
@@ -677,6 +678,45 @@ static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
        tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
 }
 
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+       u32 value;
+
+       value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+       return 0;
+}
+
+static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
+{
+       u32 value;
+
+       /*
+        * XXX Is this still needed? The module reset is deasserted right
+        * before this function is called.
+        */
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+       /* start calibration */
+       tegra_dsi_pad_enable(dsi);
+
+       value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+               DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+               DSI_PAD_OUT_CLK(0x0);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+       value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
+               DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
+
+       return tegra_mipi_calibrate(dsi->mipi);
+}
+
 static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
                                  unsigned int vrefresh)
 {
@@ -794,7 +834,6 @@ tegra_dsi_connector_mode_valid(struct drm_connector *connector,
 static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = {
        .get_modes = tegra_output_connector_get_modes,
        .mode_valid = tegra_dsi_connector_mode_valid,
-       .best_encoder = tegra_output_connector_best_encoder,
 };
 
 static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
@@ -837,7 +876,7 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
 
        tegra_dsi_disable(dsi);
 
-       return;
+       pm_runtime_put(dsi->dev);
 }
 
 static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -848,6 +887,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
        struct tegra_dsi *dsi = to_dsi(output);
        struct tegra_dsi_state *state;
        u32 value;
+       int err;
+
+       pm_runtime_get_sync(dsi->dev);
+
+       err = tegra_dsi_pad_calibrate(dsi);
+       if (err < 0)
+               dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
 
        state = tegra_dsi_get_state(dsi);
 
@@ -876,8 +922,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
 
        if (output->panel)
                drm_panel_enable(output->panel);
-
-       return;
 }
 
 static int
@@ -967,55 +1011,12 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
        .atomic_check = tegra_dsi_encoder_atomic_check,
 };
 
-static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
-{
-       u32 value;
-
-       value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
-       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
-
-       return 0;
-}
-
-static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
-{
-       u32 value;
-
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
-       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
-
-       /* start calibration */
-       tegra_dsi_pad_enable(dsi);
-
-       value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
-               DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
-               DSI_PAD_OUT_CLK(0x0);
-       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
-
-       value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
-               DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
-       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
-
-       return tegra_mipi_calibrate(dsi->mipi);
-}
-
 static int tegra_dsi_init(struct host1x_client *client)
 {
        struct drm_device *drm = dev_get_drvdata(client->parent);
        struct tegra_dsi *dsi = host1x_client_to_dsi(client);
        int err;
 
-       reset_control_deassert(dsi->rst);
-
-       err = tegra_dsi_pad_calibrate(dsi);
-       if (err < 0) {
-               dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
-               goto reset;
-       }
-
        /* Gangsters must not register their own outputs. */
        if (!dsi->master) {
                dsi->output.dev = client->dev;
@@ -1038,12 +1039,9 @@ static int tegra_dsi_init(struct host1x_client *client)
                drm_connector_register(&dsi->output.connector);
 
                err = tegra_output_init(drm, &dsi->output);
-               if (err < 0) {
-                       dev_err(client->dev,
-                               "failed to initialize output: %d\n",
+               if (err < 0)
+                       dev_err(dsi->dev, "failed to initialize output: %d\n",
                                err);
-                       goto reset;
-               }
 
                dsi->output.encoder.possible_crtcs = 0x3;
        }
@@ -1055,10 +1053,6 @@ static int tegra_dsi_init(struct host1x_client *client)
        }
 
        return 0;
-
-reset:
-       reset_control_assert(dsi->rst);
-       return err;
 }
 
 static int tegra_dsi_exit(struct host1x_client *client)
@@ -1070,7 +1064,7 @@ static int tegra_dsi_exit(struct host1x_client *client)
        if (IS_ENABLED(CONFIG_DEBUG_FS))
                tegra_dsi_debugfs_exit(dsi);
 
-       reset_control_assert(dsi->rst);
+       regulator_disable(dsi->vdd);
 
        return 0;
 }
@@ -1494,74 +1488,50 @@ static int tegra_dsi_probe(struct platform_device *pdev)
        dsi->format = MIPI_DSI_FMT_RGB888;
        dsi->lanes = 4;
 
-       dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
-       if (IS_ERR(dsi->rst))
-               return PTR_ERR(dsi->rst);
+       if (!pdev->dev.pm_domain) {
+               dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
+               if (IS_ERR(dsi->rst))
+                       return PTR_ERR(dsi->rst);
+       }
 
        dsi->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(dsi->clk)) {
                dev_err(&pdev->dev, "cannot get DSI clock\n");
-               err = PTR_ERR(dsi->clk);
-               goto reset;
-       }
-
-       err = clk_prepare_enable(dsi->clk);
-       if (err < 0) {
-               dev_err(&pdev->dev, "cannot enable DSI clock\n");
-               goto reset;
+               return PTR_ERR(dsi->clk);
        }
 
        dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
        if (IS_ERR(dsi->clk_lp)) {
                dev_err(&pdev->dev, "cannot get low-power clock\n");
-               err = PTR_ERR(dsi->clk_lp);
-               goto disable_clk;
-       }
-
-       err = clk_prepare_enable(dsi->clk_lp);
-       if (err < 0) {
-               dev_err(&pdev->dev, "cannot enable low-power clock\n");
-               goto disable_clk;
+               return PTR_ERR(dsi->clk_lp);
        }
 
        dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
        if (IS_ERR(dsi->clk_parent)) {
                dev_err(&pdev->dev, "cannot get parent clock\n");
-               err = PTR_ERR(dsi->clk_parent);
-               goto disable_clk_lp;
+               return PTR_ERR(dsi->clk_parent);
        }
 
        dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
        if (IS_ERR(dsi->vdd)) {
                dev_err(&pdev->dev, "cannot get VDD supply\n");
-               err = PTR_ERR(dsi->vdd);
-               goto disable_clk_lp;
-       }
-
-       err = regulator_enable(dsi->vdd);
-       if (err < 0) {
-               dev_err(&pdev->dev, "cannot enable VDD supply\n");
-               goto disable_clk_lp;
+               return PTR_ERR(dsi->vdd);
        }
 
        err = tegra_dsi_setup_clocks(dsi);
        if (err < 0) {
                dev_err(&pdev->dev, "cannot setup clocks\n");
-               goto disable_vdd;
+               return err;
        }
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
-       if (IS_ERR(dsi->regs)) {
-               err = PTR_ERR(dsi->regs);
-               goto disable_vdd;
-       }
+       if (IS_ERR(dsi->regs))
+               return PTR_ERR(dsi->regs);
 
        dsi->mipi = tegra_mipi_request(&pdev->dev);
-       if (IS_ERR(dsi->mipi)) {
-               err = PTR_ERR(dsi->mipi);
-               goto disable_vdd;
-       }
+       if (IS_ERR(dsi->mipi))
+               return PTR_ERR(dsi->mipi);
 
        dsi->host.ops = &tegra_dsi_host_ops;
        dsi->host.dev = &pdev->dev;
@@ -1572,6 +1542,9 @@ static int tegra_dsi_probe(struct platform_device *pdev)
                goto mipi_free;
        }
 
+       platform_set_drvdata(pdev, dsi);
+       pm_runtime_enable(&pdev->dev);
+
        INIT_LIST_HEAD(&dsi->client.list);
        dsi->client.ops = &dsi_client_ops;
        dsi->client.dev = &pdev->dev;
@@ -1583,22 +1556,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
                goto unregister;
        }
 
-       platform_set_drvdata(pdev, dsi);
-
        return 0;
 
 unregister:
        mipi_dsi_host_unregister(&dsi->host);
 mipi_free:
        tegra_mipi_free(dsi->mipi);
-disable_vdd:
-       regulator_disable(dsi->vdd);
-disable_clk_lp:
-       clk_disable_unprepare(dsi->clk_lp);
-disable_clk:
-       clk_disable_unprepare(dsi->clk);
-reset:
-       reset_control_assert(dsi->rst);
        return err;
 }
 
@@ -1607,6 +1570,8 @@ static int tegra_dsi_remove(struct platform_device *pdev)
        struct tegra_dsi *dsi = platform_get_drvdata(pdev);
        int err;
 
+       pm_runtime_disable(&pdev->dev);
+
        err = host1x_client_unregister(&dsi->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1619,14 +1584,82 @@ static int tegra_dsi_remove(struct platform_device *pdev)
        mipi_dsi_host_unregister(&dsi->host);
        tegra_mipi_free(dsi->mipi);
 
-       regulator_disable(dsi->vdd);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_dsi_suspend(struct device *dev)
+{
+       struct tegra_dsi *dsi = dev_get_drvdata(dev);
+       int err;
+
+       if (dsi->rst) {
+               err = reset_control_assert(dsi->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to assert reset: %d\n", err);
+                       return err;
+               }
+       }
+
+       usleep_range(1000, 2000);
+
        clk_disable_unprepare(dsi->clk_lp);
        clk_disable_unprepare(dsi->clk);
-       reset_control_assert(dsi->rst);
+
+       regulator_disable(dsi->vdd);
 
        return 0;
 }
 
+static int tegra_dsi_resume(struct device *dev)
+{
+       struct tegra_dsi *dsi = dev_get_drvdata(dev);
+       int err;
+
+       err = regulator_enable(dsi->vdd);
+       if (err < 0) {
+               dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
+               return err;
+       }
+
+       err = clk_prepare_enable(dsi->clk);
+       if (err < 0) {
+               dev_err(dev, "cannot enable DSI clock: %d\n", err);
+               goto disable_vdd;
+       }
+
+       err = clk_prepare_enable(dsi->clk_lp);
+       if (err < 0) {
+               dev_err(dev, "cannot enable low-power clock: %d\n", err);
+               goto disable_clk;
+       }
+
+       usleep_range(1000, 2000);
+
+       if (dsi->rst) {
+               err = reset_control_deassert(dsi->rst);
+               if (err < 0) {
+                       dev_err(dev, "cannot assert reset: %d\n", err);
+                       goto disable_clk_lp;
+               }
+       }
+
+       return 0;
+
+disable_clk_lp:
+       clk_disable_unprepare(dsi->clk_lp);
+disable_clk:
+       clk_disable_unprepare(dsi->clk);
+disable_vdd:
+       regulator_disable(dsi->vdd);
+       return err;
+}
+#endif
+
+static const struct dev_pm_ops tegra_dsi_pm_ops = {
+       SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
+};
+
 static const struct of_device_id tegra_dsi_of_match[] = {
        { .compatible = "nvidia,tegra210-dsi", },
        { .compatible = "nvidia,tegra132-dsi", },
@@ -1640,6 +1673,7 @@ struct platform_driver tegra_dsi_driver = {
        .driver = {
                .name = "tegra-dsi",
                .of_match_table = tegra_dsi_of_match,
+               .pm = &tegra_dsi_pm_ops,
        },
        .probe = tegra_dsi_probe,
        .remove = tegra_dsi_remove,
index 1b12aa7a715e2eecd64232459366b4cafa8a8bff..e6d71fa4028eab234b57973bfe318c7259244377 100644 (file)
@@ -68,7 +68,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
                struct tegra_bo *bo = fb->planes[i];
 
                if (bo) {
-                       if (bo->pages && bo->vaddr)
+                       if (bo->pages)
                                vunmap(bo->vaddr);
 
                        drm_gem_object_unreference_unlocked(&bo->gem);
index b7ef4929e347a0f2fe0f85e80cf49bac3c7dc7d3..cda0491ed6bf80f2e77fd668adaff5d1233d7e94 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/debugfs.h>
 #include <linux/gpio.h>
 #include <linux/hdmi.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 
+#include <sound/hda_verbs.h>
+
 #include "hdmi.h"
 #include "drm.h"
 #include "dc.h"
 
+#define HDMI_ELD_BUFFER_SIZE 96
+
 struct tmds_config {
        unsigned int pclk;
        u32 pll0;
@@ -39,6 +44,8 @@ struct tegra_hdmi_config {
        u32 fuse_override_value;
 
        bool has_sor_io_peak_current;
+       bool has_hda;
+       bool has_hbr;
 };
 
 struct tegra_hdmi {
@@ -60,7 +67,10 @@ struct tegra_hdmi {
        const struct tegra_hdmi_config *config;
 
        unsigned int audio_source;
-       unsigned int audio_freq;
+       unsigned int audio_sample_rate;
+       unsigned int audio_channels;
+
+       unsigned int pixel_clock;
        bool stereo;
        bool dvi;
 
@@ -402,11 +412,11 @@ static const struct tmds_config tegra124_tmds_config[] = {
 };
 
 static const struct tegra_hdmi_audio_config *
-tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
+tegra_hdmi_get_audio_config(unsigned int sample_rate, unsigned int pclk)
 {
        const struct tegra_hdmi_audio_config *table;
 
-       switch (audio_freq) {
+       switch (sample_rate) {
        case 32000:
                table = tegra_hdmi_audio_32k;
                break;
@@ -476,44 +486,114 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
        }
 }
 
-static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
+static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
+{
+       static const struct {
+               unsigned int sample_rate;
+               unsigned int offset;
+       } regs[] = {
+               {  32000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 },
+               {  44100, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 },
+               {  48000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 },
+               {  88200, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 },
+               {  96000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 },
+               { 176400, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 },
+               { 192000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 },
+       };
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(regs); i++) {
+               if (regs[i].sample_rate == hdmi->audio_sample_rate) {
+                       tegra_hdmi_writel(hdmi, value, regs[i].offset);
+                       break;
+               }
+       }
+}
+
+static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
 {
-       struct device_node *node = hdmi->dev->of_node;
        const struct tegra_hdmi_audio_config *config;
-       unsigned int offset = 0;
-       u32 value;
+       u32 source, value;
 
        switch (hdmi->audio_source) {
        case HDA:
-               value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+               if (hdmi->config->has_hda)
+                       source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
+               else
+                       return -EINVAL;
+
                break;
 
        case SPDIF:
-               value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+               if (hdmi->config->has_hda)
+                       source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
+               else
+                       source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
                break;
 
        default:
-               value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+               if (hdmi->config->has_hda)
+                       source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
+               else
+                       source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
                break;
        }
 
-       if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
-               value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
-                        AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
-               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
-       } else {
-               value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
+       /*
+        * Tegra30 and later use a slightly modified version of the register
+        * layout to accomodate for changes related to supporting HDA as the
+        * audio input source for HDMI. The source select field has moved to
+        * the SOR_AUDIO_CNTRL0 register, but the error tolerance and frames
+        * per block fields remain in the AUDIO_CNTRL0 register.
+        */
+       if (hdmi->config->has_hda) {
+               /*
+                * Inject null samples into the audio FIFO for every frame in
+                * which the codec did not receive any samples. This applies
+                * to stereo LPCM only.
+                *
+                * XXX: This seems to be a remnant of MCP days when this was
+                * used to work around issues with monitors not being able to
+                * play back system startup sounds early. It is possibly not
+                * needed on Linux at all.
+                */
+               if (hdmi->audio_channels == 2)
+                       value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL;
+               else
+                       value = 0;
+
+               value |= source;
+
                tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+       }
 
-               value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
-                       AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
-               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+       /*
+        * On Tegra20, HDA is not a supported audio source and the source
+        * select field is part of the AUDIO_CNTRL0 register.
+        */
+       value = AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
+               AUDIO_CNTRL0_ERROR_TOLERANCE(6);
+
+       if (!hdmi->config->has_hda)
+               value |= source;
+
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
+
+       /*
+        * Advertise support for High Bit-Rate on Tegra114 and later.
+        */
+       if (hdmi->config->has_hbr) {
+               value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+               value |= SOR_AUDIO_SPARE0_HBR_ENABLE;
+               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
        }
 
-       config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
+       config = tegra_hdmi_get_audio_config(hdmi->audio_sample_rate,
+                                            hdmi->pixel_clock);
        if (!config) {
-               dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
-                       hdmi->audio_freq, pclk);
+               dev_err(hdmi->dev,
+                       "cannot set audio to %u Hz at %u Hz pixel clock\n",
+                       hdmi->audio_sample_rate, hdmi->pixel_clock);
                return -EINVAL;
        }
 
@@ -526,8 +606,8 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
        tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
                          HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
 
-       value = ACR_SUBPACK_CTS(config->cts);
-       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
+       tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
+                         HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
 
        value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
@@ -536,43 +616,53 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
        value &= ~AUDIO_N_RESETF;
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
 
-       if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
-               switch (hdmi->audio_freq) {
-               case 32000:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
-                       break;
+       if (hdmi->config->has_hda)
+               tegra_hdmi_write_aval(hdmi, config->aval);
 
-               case 44100:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
-                       break;
+       tegra_hdmi_setup_audio_fs_tables(hdmi);
 
-               case 48000:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
-                       break;
+       return 0;
+}
 
-               case 88200:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
-                       break;
+static void tegra_hdmi_disable_audio(struct tegra_hdmi *hdmi)
+{
+       u32 value;
 
-               case 96000:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
-                       break;
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       value &= ~GENERIC_CTRL_AUDIO;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
 
-               case 176400:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
-                       break;
+static void tegra_hdmi_enable_audio(struct tegra_hdmi *hdmi)
+{
+       u32 value;
 
-               case 192000:
-                       offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
-                       break;
-               }
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       value |= GENERIC_CTRL_AUDIO;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
 
-               tegra_hdmi_writel(hdmi, config->aval, offset);
-       }
+static void tegra_hdmi_write_eld(struct tegra_hdmi *hdmi)
+{
+       size_t length = drm_eld_size(hdmi->output.connector.eld), i;
+       u32 value;
 
-       tegra_hdmi_setup_audio_fs_tables(hdmi);
+       for (i = 0; i < length; i++)
+               tegra_hdmi_writel(hdmi, i << 8 | hdmi->output.connector.eld[i],
+                                 HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
 
-       return 0;
+       /*
+        * The HDA codec will always report an ELD buffer size of 96 bytes and
+        * the HDA codec driver will check that each byte read from the buffer
+        * is valid. Therefore every byte must be written, even if no 96 bytes
+        * were parsed from EDID.
+        */
+       for (i = length; i < HDMI_ELD_BUFFER_SIZE; i++)
+               tegra_hdmi_writel(hdmi, i << 8 | 0,
+                                 HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
+
+       value = SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
 }
 
 static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size)
@@ -644,12 +734,6 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
        u8 buffer[17];
        ssize_t err;
 
-       if (hdmi->dvi) {
-               tegra_hdmi_writel(hdmi, 0,
-                                 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
-               return;
-       }
-
        err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
        if (err < 0) {
                dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
@@ -663,9 +747,24 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
        }
 
        tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
 
-       tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
-                         HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+       value &= ~INFOFRAME_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_avi_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
+       value |= INFOFRAME_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
 }
 
 static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
@@ -674,12 +773,6 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
        u8 buffer[14];
        ssize_t err;
 
-       if (hdmi->dvi) {
-               tegra_hdmi_writel(hdmi, 0,
-                                 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
-               return;
-       }
-
        err = hdmi_audio_infoframe_init(&frame);
        if (err < 0) {
                dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
@@ -687,7 +780,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
                return;
        }
 
-       frame.channels = 2;
+       frame.channels = hdmi->audio_channels;
 
        err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
        if (err < 0) {
@@ -703,9 +796,24 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
         * bytes can be programmed.
         */
        tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
+}
 
-       tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
-                         HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+static void tegra_hdmi_disable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+       value &= ~INFOFRAME_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+}
+
+static void tegra_hdmi_enable_audio_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
+       value |= INFOFRAME_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
 }
 
 static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
@@ -713,14 +821,6 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
        struct hdmi_vendor_infoframe frame;
        u8 buffer[10];
        ssize_t err;
-       u32 value;
-
-       if (!hdmi->stereo) {
-               value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
-               value &= ~GENERIC_CTRL_ENABLE;
-               tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
-               return;
-       }
 
        hdmi_vendor_infoframe_init(&frame);
        frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
@@ -733,6 +833,20 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
        }
 
        tegra_hdmi_write_infopack(hdmi, buffer, err);
+}
+
+static void tegra_hdmi_disable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       value &= ~GENERIC_CTRL_ENABLE;
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+}
+
+static void tegra_hdmi_enable_stereo_infoframe(struct tegra_hdmi *hdmi)
+{
+       u32 value;
 
        value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
        value |= GENERIC_CTRL_ENABLE;
@@ -772,10 +886,25 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
        return drm_detect_hdmi_monitor(edid);
 }
 
+static enum drm_connector_status
+tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+       struct tegra_output *output = connector_to_output(connector);
+       struct tegra_hdmi *hdmi = to_hdmi(output);
+       enum drm_connector_status status;
+
+       status = tegra_output_connector_detect(connector, force);
+       if (status == connector_status_connected)
+               return status;
+
+       tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+       return status;
+}
+
 static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .reset = drm_atomic_helper_connector_reset,
-       .detect = tegra_output_connector_detect,
+       .detect = tegra_hdmi_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = tegra_output_connector_destroy,
        .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -806,7 +935,6 @@ static const struct drm_connector_helper_funcs
 tegra_hdmi_connector_helper_funcs = {
        .get_modes = tegra_output_connector_get_modes,
        .mode_valid = tegra_hdmi_connector_mode_valid,
-       .best_encoder = tegra_output_connector_best_encoder,
 };
 
 static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
@@ -815,7 +943,9 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
 
 static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 {
+       struct tegra_output *output = encoder_to_output(encoder);
        struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
+       struct tegra_hdmi *hdmi = to_hdmi(output);
        u32 value;
 
        /*
@@ -829,6 +959,20 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
 
                tegra_dc_commit(dc);
        }
+
+       if (!hdmi->dvi) {
+               if (hdmi->stereo)
+                       tegra_hdmi_disable_stereo_infoframe(hdmi);
+
+               tegra_hdmi_disable_audio_infoframe(hdmi);
+               tegra_hdmi_disable_avi_infoframe(hdmi);
+               tegra_hdmi_disable_audio(hdmi);
+       }
+
+       tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
+       tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
+
+       pm_runtime_put(hdmi->dev);
 }
 
 static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -837,21 +981,28 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
        unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
        struct tegra_output *output = encoder_to_output(encoder);
        struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-       struct device_node *node = output->dev->of_node;
        struct tegra_hdmi *hdmi = to_hdmi(output);
-       unsigned int pulse_start, div82, pclk;
+       unsigned int pulse_start, div82;
        int retries = 1000;
        u32 value;
        int err;
 
-       hdmi->dvi = !tegra_output_is_hdmi(output);
+       pm_runtime_get_sync(hdmi->dev);
 
-       pclk = mode->clock * 1000;
+       /*
+        * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+        * is used for interoperability between the HDA codec driver and the
+        * HDMI driver.
+        */
+       tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_ENABLE);
+       tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_MASK);
+
+       hdmi->pixel_clock = mode->clock * 1000;
        h_sync_width = mode->hsync_end - mode->hsync_start;
        h_back_porch = mode->htotal - mode->hsync_end;
        h_front_porch = mode->hsync_start - mode->hdisplay;
 
-       err = clk_set_rate(hdmi->clk, pclk);
+       err = clk_set_rate(hdmi->clk, hdmi->pixel_clock);
        if (err < 0) {
                dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
                        err);
@@ -910,17 +1061,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
        value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
 
+       hdmi->dvi = !tegra_output_is_hdmi(output);
        if (!hdmi->dvi) {
-               err = tegra_hdmi_setup_audio(hdmi, pclk);
+               err = tegra_hdmi_setup_audio(hdmi);
                if (err < 0)
                        hdmi->dvi = true;
        }
 
-       if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
-               /*
-                * TODO: add ELD support
-                */
-       }
+       if (hdmi->config->has_hda)
+               tegra_hdmi_write_eld(hdmi);
 
        rekey = HDMI_REKEY_DEFAULT;
        value = HDMI_CTRL_REKEY(rekey);
@@ -932,20 +1081,17 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 
        tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
 
-       if (hdmi->dvi)
-               tegra_hdmi_writel(hdmi, 0x0,
-                                 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
-       else
-               tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
-                                 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
+       if (!hdmi->dvi) {
+               tegra_hdmi_setup_avi_infoframe(hdmi, mode);
+               tegra_hdmi_setup_audio_infoframe(hdmi);
 
-       tegra_hdmi_setup_avi_infoframe(hdmi, mode);
-       tegra_hdmi_setup_audio_infoframe(hdmi);
-       tegra_hdmi_setup_stereo_infoframe(hdmi);
+               if (hdmi->stereo)
+                       tegra_hdmi_setup_stereo_infoframe(hdmi);
+       }
 
        /* TMDS CONFIG */
        for (i = 0; i < hdmi->config->num_tmds; i++) {
-               if (pclk <= hdmi->config->tmds[i].pclk) {
+               if (hdmi->pixel_clock <= hdmi->config->tmds[i].pclk) {
                        tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
                        break;
                }
@@ -1032,6 +1178,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
 
        tegra_dc_commit(dc);
 
+       if (!hdmi->dvi) {
+               tegra_hdmi_enable_avi_infoframe(hdmi);
+               tegra_hdmi_enable_audio_infoframe(hdmi);
+               tegra_hdmi_enable_audio(hdmi);
+
+               if (hdmi->stereo)
+                       tegra_hdmi_enable_stereo_infoframe(hdmi);
+       }
+
        /* TODO: add HDCP support */
 }
 
@@ -1236,8 +1391,14 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
        DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
        DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
        DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+       DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1);
        DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
        DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+       DUMP_REG(HDMI_NV_PDISP_INT_STATUS);
+       DUMP_REG(HDMI_NV_PDISP_INT_MASK);
+       DUMP_REG(HDMI_NV_PDISP_INT_ENABLE);
        DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
 
 #undef DUMP_REG
@@ -1361,14 +1522,6 @@ static int tegra_hdmi_init(struct host1x_client *client)
                return err;
        }
 
-       err = clk_prepare_enable(hdmi->clk);
-       if (err < 0) {
-               dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
-               return err;
-       }
-
-       reset_control_deassert(hdmi->rst);
-
        return 0;
 }
 
@@ -1378,9 +1531,6 @@ static int tegra_hdmi_exit(struct host1x_client *client)
 
        tegra_output_exit(&hdmi->output);
 
-       reset_control_assert(hdmi->rst);
-       clk_disable_unprepare(hdmi->clk);
-
        regulator_disable(hdmi->vdd);
        regulator_disable(hdmi->pll);
        regulator_disable(hdmi->hdmi);
@@ -1402,6 +1552,8 @@ static const struct tegra_hdmi_config tegra20_hdmi_config = {
        .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
        .fuse_override_value = 1 << 31,
        .has_sor_io_peak_current = false,
+       .has_hda = false,
+       .has_hbr = false,
 };
 
 static const struct tegra_hdmi_config tegra30_hdmi_config = {
@@ -1410,6 +1562,8 @@ static const struct tegra_hdmi_config tegra30_hdmi_config = {
        .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
        .fuse_override_value = 1 << 31,
        .has_sor_io_peak_current = false,
+       .has_hda = true,
+       .has_hbr = false,
 };
 
 static const struct tegra_hdmi_config tegra114_hdmi_config = {
@@ -1418,6 +1572,8 @@ static const struct tegra_hdmi_config tegra114_hdmi_config = {
        .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
        .fuse_override_value = 1 << 31,
        .has_sor_io_peak_current = true,
+       .has_hda = true,
+       .has_hbr = true,
 };
 
 static const struct tegra_hdmi_config tegra124_hdmi_config = {
@@ -1426,6 +1582,8 @@ static const struct tegra_hdmi_config tegra124_hdmi_config = {
        .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
        .fuse_override_value = 1 << 31,
        .has_sor_io_peak_current = true,
+       .has_hda = true,
+       .has_hbr = true,
 };
 
 static const struct of_device_id tegra_hdmi_of_match[] = {
@@ -1437,6 +1595,67 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
 
+static void hda_format_parse(unsigned int format, unsigned int *rate,
+                            unsigned int *channels)
+{
+       unsigned int mul, div;
+
+       if (format & AC_FMT_BASE_44K)
+               *rate = 44100;
+       else
+               *rate = 48000;
+
+       mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+       div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+       *rate = *rate * (mul + 1) / (div + 1);
+
+       *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_hdmi_irq(int irq, void *data)
+{
+       struct tegra_hdmi *hdmi = data;
+       u32 value;
+       int err;
+
+       value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
+       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
+
+       if (value & INT_CODEC_SCRATCH0) {
+               unsigned int format;
+               u32 value;
+
+               value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+               if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+                       unsigned int sample_rate, channels;
+
+                       format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+                       hda_format_parse(format, &sample_rate, &channels);
+
+                       hdmi->audio_sample_rate = sample_rate;
+                       hdmi->audio_channels = channels;
+
+                       err = tegra_hdmi_setup_audio(hdmi);
+                       if (err < 0) {
+                               tegra_hdmi_disable_audio_infoframe(hdmi);
+                               tegra_hdmi_disable_audio(hdmi);
+                       } else {
+                               tegra_hdmi_setup_audio_infoframe(hdmi);
+                               tegra_hdmi_enable_audio_infoframe(hdmi);
+                               tegra_hdmi_enable_audio(hdmi);
+                       }
+               } else {
+                       tegra_hdmi_disable_audio_infoframe(hdmi);
+                       tegra_hdmi_disable_audio(hdmi);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
 static int tegra_hdmi_probe(struct platform_device *pdev)
 {
        const struct of_device_id *match;
@@ -1454,8 +1673,10 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
 
        hdmi->config = match->data;
        hdmi->dev = &pdev->dev;
+
        hdmi->audio_source = AUTO;
-       hdmi->audio_freq = 44100;
+       hdmi->audio_sample_rate = 48000;
+       hdmi->audio_channels = 2;
        hdmi->stereo = false;
        hdmi->dvi = false;
 
@@ -1516,6 +1737,17 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
 
        hdmi->irq = err;
 
+       err = devm_request_irq(hdmi->dev, hdmi->irq, tegra_hdmi_irq, 0,
+                              dev_name(hdmi->dev), hdmi);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+                       hdmi->irq, err);
+               return err;
+       }
+
+       platform_set_drvdata(pdev, hdmi);
+       pm_runtime_enable(&pdev->dev);
+
        INIT_LIST_HEAD(&hdmi->client.list);
        hdmi->client.ops = &hdmi_client_ops;
        hdmi->client.dev = &pdev->dev;
@@ -1527,8 +1759,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
                return err;
        }
 
-       platform_set_drvdata(pdev, hdmi);
-
        return 0;
 }
 
@@ -1537,6 +1767,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
        struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
        int err;
 
+       pm_runtime_disable(&pdev->dev);
+
        err = host1x_client_unregister(&hdmi->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1546,17 +1778,61 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
 
        tegra_output_remove(&hdmi->output);
 
-       clk_disable_unprepare(hdmi->clk_parent);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_hdmi_suspend(struct device *dev)
+{
+       struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+       int err;
+
+       err = reset_control_assert(hdmi->rst);
+       if (err < 0) {
+               dev_err(dev, "failed to assert reset: %d\n", err);
+               return err;
+       }
+
+       usleep_range(1000, 2000);
+
        clk_disable_unprepare(hdmi->clk);
 
        return 0;
 }
 
+static int tegra_hdmi_resume(struct device *dev)
+{
+       struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
+       int err;
+
+       err = clk_prepare_enable(hdmi->clk);
+       if (err < 0) {
+               dev_err(dev, "failed to enable clock: %d\n", err);
+               return err;
+       }
+
+       usleep_range(1000, 2000);
+
+       err = reset_control_deassert(hdmi->rst);
+       if (err < 0) {
+               dev_err(dev, "failed to deassert reset: %d\n", err);
+               clk_disable_unprepare(hdmi->clk);
+               return err;
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_hdmi_pm_ops = {
+       SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
+};
+
 struct platform_driver tegra_hdmi_driver = {
        .driver = {
                .name = "tegra-hdmi",
-               .owner = THIS_MODULE,
                .of_match_table = tegra_hdmi_of_match,
+               .pm = &tegra_hdmi_pm_ops,
        },
        .probe = tegra_hdmi_probe,
        .remove = tegra_hdmi_remove,
index a882514389cd05a35e503c8b0adb2bb39ba91ae4..2339f134a09ab0ff90da7c800e85dcd68cb73980 100644 (file)
 #define HDMI_NV_PDISP_KEY_SKEY_INDEX                           0xa3
 
 #define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0                         0xac
-#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
+#define  SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO   (0 << 20)
+#define  SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF  (1 << 20)
+#define  SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL   (2 << 20)
+#define  SOR_AUDIO_CNTRL0_INJECT_NULLSMPL      (1 << 29)
+#define HDMI_NV_PDISP_SOR_AUDIO_SPARE0                         0xae
+#define  SOR_AUDIO_SPARE0_HBR_ENABLE           (1 << 27)
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0             0xba
+#define  SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID    (1 << 30)
+#define  SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1             0xbb
 #define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR                  0xbc
 #define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE                   0xbd
+#define  SOR_AUDIO_HDA_PRESENSE_VALID          (1 << 1)
+#define  SOR_AUDIO_HDA_PRESENSE_PRESENT                (1 << 0)
 
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320    0xbf
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441    0xc0
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920    0xc5
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
 
+#define HDMI_NV_PDISP_INT_STATUS                       0xcc
+#define  INT_SCRATCH           (1 << 3)
+#define  INT_CP_REQUEST                (1 << 2)
+#define  INT_CODEC_SCRATCH1    (1 << 1)
+#define  INT_CODEC_SCRATCH0    (1 << 0)
+#define HDMI_NV_PDISP_INT_MASK                         0xcd
+#define HDMI_NV_PDISP_INT_ENABLE                       0xce
+
 #define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT              0xd1
 #define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) <<  0)
 #define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) <<  8)
index 46664b6222707e3d4e51e4629305268711503c33..595d1ec3e02e8ff6e42c0574b0d1d1218bd8210f 100644 (file)
@@ -36,20 +36,13 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
 
        if (edid) {
                err = drm_add_edid_modes(connector, edid);
+               drm_edid_to_eld(connector, edid);
                kfree(edid);
        }
 
        return err;
 }
 
-struct drm_encoder *
-tegra_output_connector_best_encoder(struct drm_connector *connector)
-{
-       struct tegra_output *output = connector_to_output(connector);
-
-       return &output->encoder;
-}
-
 enum drm_connector_status
 tegra_output_connector_detect(struct drm_connector *connector, bool force)
 {
index e246334e0252c8e966279a3af561f5d1ba4b6337..a131b44e2d6fba01d05cbe1c90d5d24ce4f7b20c 100644 (file)
@@ -112,7 +112,6 @@ tegra_rgb_connector_mode_valid(struct drm_connector *connector,
 static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = {
        .get_modes = tegra_output_connector_get_modes,
        .mode_valid = tegra_rgb_connector_mode_valid,
-       .best_encoder = tegra_output_connector_best_encoder,
 };
 
 static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
index 757c6e8603af8d8707a75c5c610a29facb40e4ae..74d0540b8d4c797361111fb9e8b0df288811541d 100644 (file)
@@ -7,11 +7,13 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/debugfs.h>
 #include <linux/gpio.h>
 #include <linux/io.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
 
@@ -149,6 +151,8 @@ struct tegra_sor_soc {
 
        const struct tegra_sor_hdmi_settings *settings;
        unsigned int num_settings;
+
+       const u8 *xbar_cfg;
 };
 
 struct tegra_sor;
@@ -169,7 +173,9 @@ struct tegra_sor {
 
        struct reset_control *rst;
        struct clk *clk_parent;
+       struct clk *clk_brick;
        struct clk *clk_safe;
+       struct clk *clk_src;
        struct clk *clk_dp;
        struct clk *clk;
 
@@ -190,6 +196,18 @@ struct tegra_sor {
        struct regulator *hdmi_supply;
 };
 
+struct tegra_sor_state {
+       struct drm_connector_state base;
+
+       unsigned int bpc;
+};
+
+static inline struct tegra_sor_state *
+to_sor_state(struct drm_connector_state *state)
+{
+       return container_of(state, struct tegra_sor_state, base);
+}
+
 struct tegra_sor_config {
        u32 bits_per_pixel;
 
@@ -225,6 +243,118 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
        writel(value, sor->regs + (offset << 2));
 }
 
+static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
+{
+       int err;
+
+       clk_disable_unprepare(sor->clk);
+
+       err = clk_set_parent(sor->clk, parent);
+       if (err < 0)
+               return err;
+
+       err = clk_prepare_enable(sor->clk);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+struct tegra_clk_sor_brick {
+       struct clk_hw hw;
+       struct tegra_sor *sor;
+};
+
+static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+{
+       return container_of(hw, struct tegra_clk_sor_brick, hw);
+}
+
+static const char * const tegra_clk_sor_brick_parents[] = {
+       "pll_d2_out0", "pll_dp"
+};
+
+static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct tegra_clk_sor_brick *brick = to_brick(hw);
+       struct tegra_sor *sor = brick->sor;
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+       value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
+
+       switch (index) {
+       case 0:
+               value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
+               break;
+
+       case 1:
+               value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
+               break;
+       }
+
+       tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
+
+       return 0;
+}
+
+static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+{
+       struct tegra_clk_sor_brick *brick = to_brick(hw);
+       struct tegra_sor *sor = brick->sor;
+       u8 parent = U8_MAX;
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
+
+       switch (value & SOR_CLK_CNTRL_DP_CLK_SEL_MASK) {
+       case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK:
+       case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK:
+               parent = 0;
+               break;
+
+       case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK:
+       case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK:
+               parent = 1;
+               break;
+       }
+
+       return parent;
+}
+
+static const struct clk_ops tegra_clk_sor_brick_ops = {
+       .set_parent = tegra_clk_sor_brick_set_parent,
+       .get_parent = tegra_clk_sor_brick_get_parent,
+};
+
+static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
+                                               const char *name)
+{
+       struct tegra_clk_sor_brick *brick;
+       struct clk_init_data init;
+       struct clk *clk;
+
+       brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
+       if (!brick)
+               return ERR_PTR(-ENOMEM);
+
+       brick->sor = sor;
+
+       init.name = name;
+       init.flags = 0;
+       init.parent_names = tegra_clk_sor_brick_parents;
+       init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
+       init.ops = &tegra_clk_sor_brick_ops;
+
+       brick->hw.init = &init;
+
+       clk = devm_clk_register(sor->dev, &brick->hw);
+       if (IS_ERR(clk))
+               kfree(brick);
+
+       return clk;
+}
+
 static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
                                   struct drm_dp_link *link)
 {
@@ -569,10 +699,10 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
        return false;
 }
 
-static int tegra_sor_calc_config(struct tegra_sor *sor,
-                                const struct drm_display_mode *mode,
-                                struct tegra_sor_config *config,
-                                struct drm_dp_link *link)
+static int tegra_sor_compute_config(struct tegra_sor *sor,
+                                   const struct drm_display_mode *mode,
+                                   struct tegra_sor_config *config,
+                                   struct drm_dp_link *link)
 {
        const u64 f = 100000, link_rate = link->rate * 1000;
        const u64 pclk = mode->clock * 1000;
@@ -661,6 +791,135 @@ static int tegra_sor_calc_config(struct tegra_sor *sor,
        return 0;
 }
 
+static void tegra_sor_apply_config(struct tegra_sor *sor,
+                                  const struct tegra_sor_config *config)
+{
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
+       value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
+       value |= SOR_DP_LINKCTL_TU_SIZE(config->tu_size);
+       tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
+
+       value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
+       value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
+       value |= SOR_DP_CONFIG_WATERMARK(config->watermark);
+
+       value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
+       value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config->active_count);
+
+       value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
+       value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config->active_frac);
+
+       if (config->active_polarity)
+               value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+       else
+               value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
+
+       value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
+       value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
+       tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
+
+       value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+       value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
+       value |= config->hblank_symbols & 0xffff;
+       tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
+
+       value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+       value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
+       value |= config->vblank_symbols & 0xffff;
+       tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
+}
+
+static void tegra_sor_mode_set(struct tegra_sor *sor,
+                              const struct drm_display_mode *mode,
+                              struct tegra_sor_state *state)
+{
+       struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
+       unsigned int vbe, vse, hbe, hse, vbs, hbs;
+       u32 value;
+
+       value = tegra_sor_readl(sor, SOR_STATE1);
+       value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
+       value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
+       value &= ~SOR_STATE_ASY_OWNER_MASK;
+
+       value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
+                SOR_STATE_ASY_OWNER(dc->pipe + 1);
+
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+               value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               value |= SOR_STATE_ASY_HSYNCPOL;
+
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+               value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               value |= SOR_STATE_ASY_VSYNCPOL;
+
+       switch (state->bpc) {
+       case 16:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_48_444;
+               break;
+
+       case 12:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_36_444;
+               break;
+
+       case 10:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_30_444;
+               break;
+
+       case 8:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+               break;
+
+       case 6:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
+               break;
+
+       default:
+               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
+               break;
+       }
+
+       tegra_sor_writel(sor, value, SOR_STATE1);
+
+       /*
+        * TODO: The video timing programming below doesn't seem to match the
+        * register definitions.
+        */
+
+       value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
+       tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
+
+       /* sync end = sync width - 1 */
+       vse = mode->vsync_end - mode->vsync_start - 1;
+       hse = mode->hsync_end - mode->hsync_start - 1;
+
+       value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
+       tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
+
+       /* blank end = sync end + back porch */
+       vbe = vse + (mode->vtotal - mode->vsync_end);
+       hbe = hse + (mode->htotal - mode->hsync_end);
+
+       value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
+       tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
+
+       /* blank start = blank end + active */
+       vbs = vbe + mode->vdisplay;
+       hbs = hbe + mode->hdisplay;
+
+       value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
+       tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
+
+       /* XXX interlacing support */
+       tegra_sor_writel(sor, 0x001, SOR_HEAD_STATE5(dc->pipe));
+}
+
 static int tegra_sor_detach(struct tegra_sor *sor)
 {
        unsigned long value, timeout;
@@ -733,7 +992,8 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
        if ((value & SOR_PWR_TRIGGER) != 0)
                return -ETIMEDOUT;
 
-       err = clk_set_parent(sor->clk, sor->clk_safe);
+       /* switch to safe parent clock */
+       err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
        if (err < 0)
                dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
 
@@ -1038,6 +1298,22 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
        sor->debugfs = NULL;
 }
 
+static void tegra_sor_connector_reset(struct drm_connector *connector)
+{
+       struct tegra_sor_state *state;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return;
+
+       if (connector->state) {
+               __drm_atomic_helper_connector_destroy_state(connector->state);
+               kfree(connector->state);
+       }
+
+       __drm_atomic_helper_connector_reset(connector, &state->base);
+}
+
 static enum drm_connector_status
 tegra_sor_connector_detect(struct drm_connector *connector, bool force)
 {
@@ -1050,13 +1326,28 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
        return tegra_output_connector_detect(connector, force);
 }
 
+static struct drm_connector_state *
+tegra_sor_connector_duplicate_state(struct drm_connector *connector)
+{
+       struct tegra_sor_state *state = to_sor_state(connector->state);
+       struct tegra_sor_state *copy;
+
+       copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+       if (!copy)
+               return NULL;
+
+       __drm_atomic_helper_connector_duplicate_state(connector, &copy->base);
+
+       return &copy->base;
+}
+
 static const struct drm_connector_funcs tegra_sor_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
-       .reset = drm_atomic_helper_connector_reset,
+       .reset = tegra_sor_connector_reset,
        .detect = tegra_sor_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = tegra_output_connector_destroy,
-       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_duplicate_state = tegra_sor_connector_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
@@ -1081,13 +1372,16 @@ static enum drm_mode_status
 tegra_sor_connector_mode_valid(struct drm_connector *connector,
                               struct drm_display_mode *mode)
 {
+       /* HDMI 2.0 modes are not yet supported */
+       if (mode->clock > 340000)
+               return MODE_NOCLOCK;
+
        return MODE_OK;
 }
 
 static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = {
        .get_modes = tegra_sor_connector_get_modes,
        .mode_valid = tegra_sor_connector_mode_valid,
-       .best_encoder = tegra_output_connector_best_encoder,
 };
 
 static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
@@ -1141,8 +1435,7 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder)
        if (output->panel)
                drm_panel_unprepare(output->panel);
 
-       reset_control_assert(sor->rst);
-       clk_disable_unprepare(sor->clk);
+       pm_runtime_put(sor->dev);
 }
 
 #if 0
@@ -1192,19 +1485,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
        struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct tegra_output *output = encoder_to_output(encoder);
        struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-       unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
        struct tegra_sor *sor = to_sor(output);
        struct tegra_sor_config config;
+       struct tegra_sor_state *state;
        struct drm_dp_link link;
        u8 rate, lanes;
+       unsigned int i;
        int err = 0;
        u32 value;
 
-       err = clk_prepare_enable(sor->clk);
-       if (err < 0)
-               dev_err(sor->dev, "failed to enable clock: %d\n", err);
+       state = to_sor_state(output->connector.state);
 
-       reset_control_deassert(sor->rst);
+       pm_runtime_get_sync(sor->dev);
 
        if (output->panel)
                drm_panel_prepare(output->panel);
@@ -1219,17 +1511,17 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
                return;
        }
 
-       err = clk_set_parent(sor->clk, sor->clk_safe);
+       /* switch to safe parent clock */
+       err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
        if (err < 0)
                dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
 
        memset(&config, 0, sizeof(config));
-       config.bits_per_pixel = output->connector.display_info.bpc * 3;
+       config.bits_per_pixel = state->bpc * 3;
 
-       err = tegra_sor_calc_config(sor, mode, &config, &link);
+       err = tegra_sor_compute_config(sor, mode, &config, &link);
        if (err < 0)
-               dev_err(sor->dev, "failed to compute link configuration: %d\n",
-                       err);
+               dev_err(sor->dev, "failed to compute configuration: %d\n", err);
 
        value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
        value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
@@ -1326,10 +1618,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
        value &= ~SOR_PLL2_PORT_POWERDOWN;
        tegra_sor_writel(sor, value, SOR_PLL2);
 
-       /* switch to DP clock */
-       err = clk_set_parent(sor->clk, sor->clk_dp);
+       /* XXX not in TRM */
+       for (value = 0, i = 0; i < 5; i++)
+               value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+                        SOR_XBAR_CTRL_LINK1_XSEL(i, i);
+
+       tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+       tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+
+       /* switch to DP parent clock */
+       err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
        if (err < 0)
-               dev_err(sor->dev, "failed to set DP parent clock: %d\n", err);
+               dev_err(sor->dev, "failed to set parent clock: %d\n", err);
 
        /* power DP lanes */
        value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
@@ -1375,13 +1675,11 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
        value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
        tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
 
-       /* set linkctl */
+       tegra_sor_apply_config(sor, &config);
+
+       /* enable link */
        value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
        value |= SOR_DP_LINKCTL_ENABLE;
-
-       value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
-       value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
-
        value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
        tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
 
@@ -1394,35 +1692,6 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
 
        tegra_sor_writel(sor, value, SOR_DP_TPG);
 
-       value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
-       value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
-       value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
-
-       value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
-       value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count);
-
-       value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
-       value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac);
-
-       if (config.active_polarity)
-               value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
-       else
-               value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
-
-       value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
-       value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
-       tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
-
-       value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
-       value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
-       value |= config.hblank_symbols & 0xffff;
-       tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
-
-       value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
-       value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
-       value |= config.vblank_symbols & 0xffff;
-       tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
-
        /* enable pad calibration logic */
        value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
        value |= SOR_DP_PADCTL_PAD_CAL_PD;
@@ -1478,75 +1747,19 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
        if (err < 0)
                dev_err(sor->dev, "failed to power up SOR: %d\n", err);
 
-       /*
-        * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
-        * raster, associate with display controller)
-        */
-       value = SOR_STATE_ASY_PROTOCOL_DP_A |
-               SOR_STATE_ASY_CRC_MODE_COMPLETE |
-               SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
-       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-               value &= ~SOR_STATE_ASY_HSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               value |= SOR_STATE_ASY_HSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-               value &= ~SOR_STATE_ASY_VSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               value |= SOR_STATE_ASY_VSYNCPOL;
-
-       switch (config.bits_per_pixel) {
-       case 24:
-               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
-               break;
-
-       case 18:
-               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
-               break;
-
-       default:
-               BUG();
-               break;
-       }
-
-       tegra_sor_writel(sor, value, SOR_STATE1);
-
-       /*
-        * TODO: The video timing programming below doesn't seem to match the
-        * register definitions.
-        */
-
-       value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
-       vse = mode->vsync_end - mode->vsync_start - 1;
-       hse = mode->hsync_end - mode->hsync_start - 1;
-
-       value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
-       vbe = vse + (mode->vsync_start - mode->vdisplay);
-       hbe = hse + (mode->hsync_start - mode->hdisplay);
-
-       value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
-       vbs = vbe + mode->vdisplay;
-       hbs = hbe + mode->hdisplay;
-
-       value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
-       tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
-
        /* CSTM (LVDS, link A/B, upper) */
        value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
                SOR_CSTM_UPPER;
        tegra_sor_writel(sor, value, SOR_CSTM);
 
+       /* use DP-A protocol */
+       value = tegra_sor_readl(sor, SOR_STATE1);
+       value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
+       value |= SOR_STATE_ASY_PROTOCOL_DP_A;
+       tegra_sor_writel(sor, value, SOR_STATE1);
+
+       tegra_sor_mode_set(sor, mode, state);
+
        /* PWM setup */
        err = tegra_sor_setup_pwm(sor, 250);
        if (err < 0)
@@ -1578,11 +1791,15 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
                               struct drm_connector_state *conn_state)
 {
        struct tegra_output *output = encoder_to_output(encoder);
+       struct tegra_sor_state *state = to_sor_state(conn_state);
        struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
        unsigned long pclk = crtc_state->mode.clock * 1000;
        struct tegra_sor *sor = to_sor(output);
+       struct drm_display_info *info;
        int err;
 
+       info = &output->connector.display_info;
+
        err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
                                         pclk, 0);
        if (err < 0) {
@@ -1590,6 +1807,18 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
                return err;
        }
 
+       switch (info->bpc) {
+       case 8:
+       case 6:
+               state->bpc = info->bpc;
+               break;
+
+       default:
+               DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
+               state->bpc = 8;
+               break;
+       }
+
        return 0;
 }
 
@@ -1752,9 +1981,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
        if (err < 0)
                dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
 
-       reset_control_assert(sor->rst);
-       usleep_range(1000, 2000);
-       clk_disable_unprepare(sor->clk);
+       pm_runtime_put(sor->dev);
 }
 
 static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
@@ -1762,26 +1989,21 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
        struct tegra_output *output = encoder_to_output(encoder);
        unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
        struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
-       unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
        struct tegra_sor_hdmi_settings *settings;
        struct tegra_sor *sor = to_sor(output);
+       struct tegra_sor_state *state;
        struct drm_display_mode *mode;
-       struct drm_display_info *info;
+       unsigned int div, i;
        u32 value;
        int err;
 
+       state = to_sor_state(output->connector.state);
        mode = &encoder->crtc->state->adjusted_mode;
-       info = &output->connector.display_info;
 
-       err = clk_prepare_enable(sor->clk);
-       if (err < 0)
-               dev_err(sor->dev, "failed to enable clock: %d\n", err);
+       pm_runtime_get_sync(sor->dev);
 
-       usleep_range(1000, 2000);
-
-       reset_control_deassert(sor->rst);
-
-       err = clk_set_parent(sor->clk, sor->clk_safe);
+       /* switch to safe parent clock */
+       err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
        if (err < 0)
                dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
 
@@ -1877,22 +2099,20 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
        value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
        tegra_sor_writel(sor, value, SOR_REFCLK);
 
-       /* XXX don't hardcode */
-       value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) |
-               SOR_XBAR_CTRL_LINK1_XSEL(3, 3) |
-               SOR_XBAR_CTRL_LINK1_XSEL(2, 2) |
-               SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
-               SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
-               SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
-               SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
-               SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
-               SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
-               SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
-       tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
+       /* XXX not in TRM */
+       for (value = 0, i = 0; i < 5; i++)
+               value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
+                        SOR_XBAR_CTRL_LINK1_XSEL(i, i);
 
        tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
+       tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
 
-       err = clk_set_parent(sor->clk, sor->clk_parent);
+       /* switch to parent clock */
+       err = clk_set_parent(sor->clk_src, sor->clk_parent);
+       if (err < 0)
+               dev_err(sor->dev, "failed to set source clock: %d\n", err);
+
+       err = tegra_sor_set_parent_clock(sor, sor->clk_src);
        if (err < 0)
                dev_err(sor->dev, "failed to set parent clock: %d\n", err);
 
@@ -2002,7 +2222,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
        value &= ~DITHER_CONTROL_MASK;
        value &= ~BASE_COLOR_SIZE_MASK;
 
-       switch (info->bpc) {
+       switch (state->bpc) {
        case 6:
                value |= BASE_COLOR_SIZE_666;
                break;
@@ -2012,7 +2232,8 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
                break;
 
        default:
-               WARN(1, "%u bits-per-color not supported\n", info->bpc);
+               WARN(1, "%u bits-per-color not supported\n", state->bpc);
+               value |= BASE_COLOR_SIZE_888;
                break;
        }
 
@@ -2022,83 +2243,19 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
        if (err < 0)
                dev_err(sor->dev, "failed to power up SOR: %d\n", err);
 
-       /* configure mode */
-       value = tegra_sor_readl(sor, SOR_STATE1);
-       value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
-       value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
-       value &= ~SOR_STATE_ASY_OWNER_MASK;
-
-       value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
-                SOR_STATE_ASY_OWNER(dc->pipe + 1);
-
-       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
-               value &= ~SOR_STATE_ASY_HSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
-               value |= SOR_STATE_ASY_HSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
-               value &= ~SOR_STATE_ASY_VSYNCPOL;
-
-       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
-               value |= SOR_STATE_ASY_VSYNCPOL;
-
-       switch (info->bpc) {
-       case 8:
-               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
-               break;
-
-       case 6:
-               value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
-               break;
-
-       default:
-               BUG();
-               break;
-       }
-
-       tegra_sor_writel(sor, value, SOR_STATE1);
-
+       /* configure dynamic range of output */
        value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
        value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
        value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
        tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
 
+       /* configure colorspace */
        value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
        value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
        value |= SOR_HEAD_STATE_COLORSPACE_RGB;
        tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
 
-       /*
-        * TODO: The video timing programming below doesn't seem to match the
-        * register definitions.
-        */
-
-       value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
-
-       /* sync end = sync width - 1 */
-       vse = mode->vsync_end - mode->vsync_start - 1;
-       hse = mode->hsync_end - mode->hsync_start - 1;
-
-       value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
-
-       /* blank end = sync end + back porch */
-       vbe = vse + (mode->vtotal - mode->vsync_end);
-       hbe = hse + (mode->htotal - mode->hsync_end);
-
-       value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
-
-       /* blank start = blank end + active */
-       vbs = vbe + mode->vdisplay;
-       hbs = hbe + mode->hdisplay;
-
-       value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
-       tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
-
-       tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
+       tegra_sor_mode_set(sor, mode, state);
 
        tegra_sor_update(sor);
 
@@ -2196,10 +2353,13 @@ static int tegra_sor_init(struct host1x_client *client)
         * XXX: Remove this reset once proper hand-over from firmware to
         * kernel is possible.
         */
-       err = reset_control_assert(sor->rst);
-       if (err < 0) {
-               dev_err(sor->dev, "failed to assert SOR reset: %d\n", err);
-               return err;
+       if (sor->rst) {
+               err = reset_control_assert(sor->rst);
+               if (err < 0) {
+                       dev_err(sor->dev, "failed to assert SOR reset: %d\n",
+                               err);
+                       return err;
+               }
        }
 
        err = clk_prepare_enable(sor->clk);
@@ -2210,10 +2370,13 @@ static int tegra_sor_init(struct host1x_client *client)
 
        usleep_range(1000, 3000);
 
-       err = reset_control_deassert(sor->rst);
-       if (err < 0) {
-               dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err);
-               return err;
+       if (sor->rst) {
+               err = reset_control_deassert(sor->rst);
+               if (err < 0) {
+                       dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
+                               err);
+                       return err;
+               }
        }
 
        err = clk_prepare_enable(sor->clk_safe);
@@ -2324,11 +2487,16 @@ static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
        .remove = tegra_sor_hdmi_remove,
 };
 
+static const u8 tegra124_sor_xbar_cfg[5] = {
+       0, 1, 2, 3, 4
+};
+
 static const struct tegra_sor_soc tegra124_sor = {
        .supports_edp = true,
        .supports_lvds = true,
        .supports_hdmi = false,
        .supports_dp = false,
+       .xbar_cfg = tegra124_sor_xbar_cfg,
 };
 
 static const struct tegra_sor_soc tegra210_sor = {
@@ -2336,6 +2504,11 @@ static const struct tegra_sor_soc tegra210_sor = {
        .supports_lvds = false,
        .supports_hdmi = false,
        .supports_dp = false,
+       .xbar_cfg = tegra124_sor_xbar_cfg,
+};
+
+static const u8 tegra210_sor_xbar_cfg[5] = {
+       2, 1, 0, 3, 4
 };
 
 static const struct tegra_sor_soc tegra210_sor1 = {
@@ -2346,6 +2519,8 @@ static const struct tegra_sor_soc tegra210_sor1 = {
 
        .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
        .settings = tegra210_sor_hdmi_defaults,
+
+       .xbar_cfg = tegra210_sor_xbar_cfg,
 };
 
 static const struct of_device_id tegra_sor_of_match[] = {
@@ -2435,11 +2610,14 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
-       sor->rst = devm_reset_control_get(&pdev->dev, "sor");
-       if (IS_ERR(sor->rst)) {
-               err = PTR_ERR(sor->rst);
-               dev_err(&pdev->dev, "failed to get reset control: %d\n", err);
-               goto remove;
+       if (!pdev->dev.pm_domain) {
+               sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+               if (IS_ERR(sor->rst)) {
+                       err = PTR_ERR(sor->rst);
+                       dev_err(&pdev->dev, "failed to get reset control: %d\n",
+                               err);
+                       goto remove;
+               }
        }
 
        sor->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2449,6 +2627,16 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
+       if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
+               sor->clk_src = devm_clk_get(&pdev->dev, "source");
+               if (IS_ERR(sor->clk_src)) {
+                       err = PTR_ERR(sor->clk_src);
+                       dev_err(sor->dev, "failed to get source clock: %d\n",
+                               err);
+                       goto remove;
+               }
+       }
+
        sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
        if (IS_ERR(sor->clk_parent)) {
                err = PTR_ERR(sor->clk_parent);
@@ -2470,6 +2658,19 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
+       platform_set_drvdata(pdev, sor);
+       pm_runtime_enable(&pdev->dev);
+
+       pm_runtime_get_sync(&pdev->dev);
+       sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
+       pm_runtime_put(&pdev->dev);
+
+       if (IS_ERR(sor->clk_brick)) {
+               err = PTR_ERR(sor->clk_brick);
+               dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+               goto remove;
+       }
+
        INIT_LIST_HEAD(&sor->client.list);
        sor->client.ops = &sor_client_ops;
        sor->client.dev = &pdev->dev;
@@ -2481,8 +2682,6 @@ static int tegra_sor_probe(struct platform_device *pdev)
                goto remove;
        }
 
-       platform_set_drvdata(pdev, sor);
-
        return 0;
 
 remove:
@@ -2498,6 +2697,8 @@ static int tegra_sor_remove(struct platform_device *pdev)
        struct tegra_sor *sor = platform_get_drvdata(pdev);
        int err;
 
+       pm_runtime_disable(&pdev->dev);
+
        err = host1x_client_unregister(&sor->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -2516,10 +2717,62 @@ static int tegra_sor_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int tegra_sor_suspend(struct device *dev)
+{
+       struct tegra_sor *sor = dev_get_drvdata(dev);
+       int err;
+
+       if (sor->rst) {
+               err = reset_control_assert(sor->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to assert reset: %d\n", err);
+                       return err;
+               }
+       }
+
+       usleep_range(1000, 2000);
+
+       clk_disable_unprepare(sor->clk);
+
+       return 0;
+}
+
+static int tegra_sor_resume(struct device *dev)
+{
+       struct tegra_sor *sor = dev_get_drvdata(dev);
+       int err;
+
+       err = clk_prepare_enable(sor->clk);
+       if (err < 0) {
+               dev_err(dev, "failed to enable clock: %d\n", err);
+               return err;
+       }
+
+       usleep_range(1000, 2000);
+
+       if (sor->rst) {
+               err = reset_control_deassert(sor->rst);
+               if (err < 0) {
+                       dev_err(dev, "failed to deassert reset: %d\n", err);
+                       clk_disable_unprepare(sor->clk);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops tegra_sor_pm_ops = {
+       SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL)
+};
+
 struct platform_driver tegra_sor_driver = {
        .driver = {
                .name = "tegra-sor",
                .of_match_table = tegra_sor_of_match,
+               .pm = &tegra_sor_pm_ops,
        },
        .probe = tegra_sor_probe,
        .remove = tegra_sor_remove,
index 2d31d027e3f68cfe22d36d03cd4f75f6a575333b..865c73b489685c74c8b438b145602adb20f8a5a9 100644 (file)
@@ -27,6 +27,9 @@
 #define  SOR_STATE_ASY_PIXELDEPTH_MASK         (0xf << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_18_444   (0x2 << 17)
 #define  SOR_STATE_ASY_PIXELDEPTH_BPP_24_444   (0x5 << 17)
+#define  SOR_STATE_ASY_PIXELDEPTH_BPP_30_444   (0x6 << 17)
+#define  SOR_STATE_ASY_PIXELDEPTH_BPP_36_444   (0x8 << 17)
+#define  SOR_STATE_ASY_PIXELDEPTH_BPP_48_444   (0x9 << 17)
 #define  SOR_STATE_ASY_VSYNCPOL                        (1 << 13)
 #define  SOR_STATE_ASY_HSYNCPOL                        (1 << 12)
 #define  SOR_STATE_ASY_PROTOCOL_MASK           (0xf << 8)
index f60a1ec84fa4b1025891830f0b3444b0a16b9dbf..28fed7e206d030abb23eb1141a7eac672fc702a7 100644 (file)
@@ -2,7 +2,6 @@ config DRM_TILCDC
        tristate "DRM Support for TI LCDC Display Controller"
        depends on DRM && OF && ARM
        select DRM_KMS_HELPER
-       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
        select VIDEOMODE_HELPERS
index 79027b1c64d32acfdcf969b8a2106fcbeebeb59b..107c8bd04f6dee0d81f63c0369b39264043e646b 100644 (file)
@@ -697,7 +697,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
 
                spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
 
-               drm_handle_vblank(dev, 0);
+               drm_crtc_handle_vblank(crtc);
 
                if (!skip_event) {
                        struct drm_pending_vblank_event *event;
index 709bc903524d6d618aacec36cc8b1754f6b9295e..d27809372d54491f19534ae43fc5745c74ae1131 100644 (file)
@@ -541,7 +541,6 @@ static struct drm_driver tilcdc_driver = {
        .load               = tilcdc_load,
        .unload             = tilcdc_unload,
        .lastclose          = tilcdc_lastclose,
-       .set_busid          = drm_platform_set_busid,
        .irq_handler        = tilcdc_irq,
        .irq_preinstall     = tilcdc_irq_preinstall,
        .irq_postinstall    = tilcdc_irq_postinstall,
@@ -549,7 +548,7 @@ static struct drm_driver tilcdc_driver = {
        .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank      = tilcdc_enable_vblank,
        .disable_vblank     = tilcdc_disable_vblank,
-       .gem_free_object    = drm_gem_cma_free_object,
+       .gem_free_object_unlocked = drm_gem_cma_free_object,
        .gem_vm_ops         = &drm_gem_cma_vm_ops,
        .dumb_create        = drm_gem_cma_dumb_create,
        .dumb_map_offset    = drm_gem_cma_dumb_map_offset,
index 39386f50af876fd9494f9064384c5d050918d301..4054d804fe068f5821d49d675549d40a7ea23fb6 100644 (file)
@@ -146,10 +146,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
        BUG_ON(bo->mem.mm_node != NULL);
        BUG_ON(!list_empty(&bo->lru));
        BUG_ON(!list_empty(&bo->ddestroy));
-
-       if (bo->ttm)
-               ttm_tt_destroy(bo->ttm);
+       ttm_tt_destroy(bo->ttm);
        atomic_dec(&bo->glob->bo_count);
+       fence_put(bo->moving);
        if (bo->resv == &bo->ttm_resv)
                reservation_object_fini(&bo->ttm_resv);
        mutex_destroy(&bo->wu_mutex);
@@ -360,7 +359,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                ret = bdev->driver->move(bo, evict, interruptible,
                                         no_wait_gpu, mem);
        else
-               ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
+               ret = ttm_bo_move_memcpy(bo, evict, interruptible,
+                                        no_wait_gpu, mem);
 
        if (ret) {
                if (bdev->driver->move_notify) {
@@ -396,8 +396,7 @@ moved:
 
 out_err:
        new_man = &bdev->man[bo->mem.mem_type];
-       if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
-               ttm_tt_unbind(bo->ttm);
+       if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
                ttm_tt_destroy(bo->ttm);
                bo->ttm = NULL;
        }
@@ -418,11 +417,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
        if (bo->bdev->driver->move_notify)
                bo->bdev->driver->move_notify(bo, NULL);
 
-       if (bo->ttm) {
-               ttm_tt_unbind(bo->ttm);
-               ttm_tt_destroy(bo->ttm);
-               bo->ttm = NULL;
-       }
+       ttm_tt_destroy(bo->ttm);
+       bo->ttm = NULL;
        ttm_bo_mem_put(bo, &bo->mem);
 
        ww_mutex_unlock (&bo->resv->lock);
@@ -688,15 +684,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
        struct ttm_placement placement;
        int ret = 0;
 
-       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
-
-       if (unlikely(ret != 0)) {
-               if (ret != -ERESTARTSYS) {
-                       pr_err("Failed to expire sync object before buffer eviction\n");
-               }
-               goto out;
-       }
-
        lockdep_assert_held(&bo->resv->lock.base);
 
        evict_mem = bo->mem;
@@ -720,7 +707,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
 
        ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
                                     no_wait_gpu);
-       if (ret) {
+       if (unlikely(ret)) {
                if (ret != -ERESTARTSYS)
                        pr_err("Buffer eviction failed\n");
                ttm_bo_mem_put(bo, &evict_mem);
@@ -799,6 +786,34 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 }
 EXPORT_SYMBOL(ttm_bo_mem_put);
 
+/**
+ * Add the last move fence to the BO and reserve a new shared slot.
+ */
+static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
+                                struct ttm_mem_type_manager *man,
+                                struct ttm_mem_reg *mem)
+{
+       struct fence *fence;
+       int ret;
+
+       spin_lock(&man->move_lock);
+       fence = fence_get(man->move);
+       spin_unlock(&man->move_lock);
+
+       if (fence) {
+               reservation_object_add_shared_fence(bo->resv, fence);
+
+               ret = reservation_object_reserve_shared(bo->resv);
+               if (unlikely(ret))
+                       return ret;
+
+               fence_put(bo->moving);
+               bo->moving = fence;
+       }
+
+       return 0;
+}
+
 /**
  * Repeatedly evict memory from the LRU for @mem_type until we create enough
  * space, or we've evicted everything and there isn't enough space.
@@ -825,10 +840,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
                if (unlikely(ret != 0))
                        return ret;
        } while (1);
-       if (mem->mm_node == NULL)
-               return -ENOMEM;
        mem->mem_type = mem_type;
-       return 0;
+       return ttm_bo_add_move_fence(bo, man, mem);
 }
 
 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -898,6 +911,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
        bool has_erestartsys = false;
        int i, ret;
 
+       ret = reservation_object_reserve_shared(bo->resv);
+       if (unlikely(ret))
+               return ret;
+
        mem->mm_node = NULL;
        for (i = 0; i < placement->num_placement; ++i) {
                const struct ttm_place *place = &placement->placement[i];
@@ -931,9 +948,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
                ret = (*man->func->get_node)(man, bo, place, mem);
                if (unlikely(ret))
                        return ret;
-               
-               if (mem->mm_node)
+
+               if (mem->mm_node) {
+                       ret = ttm_bo_add_move_fence(bo, man, mem);
+                       if (unlikely(ret)) {
+                               (*man->func->put_node)(man, mem);
+                               return ret;
+                       }
                        break;
+               }
        }
 
        if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
@@ -1000,20 +1023,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 
        lockdep_assert_held(&bo->resv->lock.base);
 
-       /*
-        * Don't wait for the BO on initial allocation. This is important when
-        * the BO has an imported reservation object.
-        */
-       if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
-               /*
-                * FIXME: It's possible to pipeline buffer moves.
-                * Have the driver move function wait for idle when necessary,
-                * instead of doing it here.
-                */
-               ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
-               if (ret)
-                       return ret;
-       }
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
        mem.page_alignment = bo->mem.page_alignment;
@@ -1034,9 +1043,9 @@ out_unlock:
        return ret;
 }
 
-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
-                             struct ttm_mem_reg *mem,
-                             uint32_t *new_flags)
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+                      struct ttm_mem_reg *mem,
+                      uint32_t *new_flags)
 {
        int i;
 
@@ -1068,6 +1077,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
 
        return false;
 }
+EXPORT_SYMBOL(ttm_bo_mem_compat);
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
@@ -1165,7 +1175,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
        bo->mem.page_alignment = page_alignment;
        bo->mem.bus.io_reserved_vm = false;
        bo->mem.bus.io_reserved_count = 0;
-       bo->priv_flags = 0;
+       bo->moving = NULL;
        bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
        bo->persistent_swap_storage = persistent_swap_storage;
        bo->acc_size = acc_size;
@@ -1277,6 +1287,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 {
        struct ttm_mem_type_manager *man = &bdev->man[mem_type];
        struct ttm_bo_global *glob = bdev->glob;
+       struct fence *fence;
        int ret;
 
        /*
@@ -1297,6 +1308,23 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
                spin_lock(&glob->lru_lock);
        }
        spin_unlock(&glob->lru_lock);
+
+       spin_lock(&man->move_lock);
+       fence = fence_get(man->move);
+       spin_unlock(&man->move_lock);
+
+       if (fence) {
+               ret = fence_wait(fence, false);
+               fence_put(fence);
+               if (ret) {
+                       if (allow_errors) {
+                               return ret;
+                       } else {
+                               pr_err("Cleanup eviction failed\n");
+                       }
+               }
+       }
+
        return 0;
 }
 
@@ -1316,6 +1344,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
                       mem_type);
                return ret;
        }
+       fence_put(man->move);
 
        man->use_type = false;
        man->has_type = false;
@@ -1361,6 +1390,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        man->io_reserve_fastpath = true;
        man->use_io_reserve_lru = false;
        mutex_init(&man->io_reserve_mutex);
+       spin_lock_init(&man->move_lock);
        INIT_LIST_HEAD(&man->io_reserve_lru);
 
        ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1379,6 +1409,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
        man->size = p_size;
 
        INIT_LIST_HEAD(&man->lru);
+       man->move = NULL;
 
        return 0;
 }
@@ -1572,47 +1603,17 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 int ttm_bo_wait(struct ttm_buffer_object *bo,
                bool interruptible, bool no_wait)
 {
-       struct reservation_object_list *fobj;
-       struct reservation_object *resv;
-       struct fence *excl;
-       long timeout = 15 * HZ;
-       int i;
-
-       resv = bo->resv;
-       fobj = reservation_object_get_list(resv);
-       excl = reservation_object_get_excl(resv);
-       if (excl) {
-               if (!fence_is_signaled(excl)) {
-                       if (no_wait)
-                               return -EBUSY;
-
-                       timeout = fence_wait_timeout(excl,
-                                                    interruptible, timeout);
-               }
-       }
-
-       for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
-               struct fence *fence;
-               fence = rcu_dereference_protected(fobj->shared[i],
-                                               reservation_object_held(resv));
-
-               if (!fence_is_signaled(fence)) {
-                       if (no_wait)
-                               return -EBUSY;
-
-                       timeout = fence_wait_timeout(fence,
-                                                    interruptible, timeout);
-               }
-       }
+       long timeout = no_wait ? 0 : 15 * HZ;
 
+       timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
+                                                     interruptible, timeout);
        if (timeout < 0)
                return timeout;
 
        if (timeout == 0)
                return -EBUSY;
 
-       reservation_object_add_excl_fence(resv, NULL);
-       clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+       reservation_object_add_excl_fence(bo->resv, NULL);
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_wait);
@@ -1682,14 +1683,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
        ttm_bo_list_ref_sub(bo, put_count, true);
 
        /**
-        * Wait for GPU, then move to system cached.
+        * Move to system cached
         */
 
-       ret = ttm_bo_wait(bo, false, false);
-
-       if (unlikely(ret != 0))
-               goto out;
-
        if ((bo->mem.placement & swap_placement) != swap_placement) {
                struct ttm_mem_reg evict_mem;
 
@@ -1704,6 +1700,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
                        goto out;
        }
 
+       /**
+        * Make sure BO is idle.
+        */
+
+       ret = ttm_bo_wait(bo, false, false);
+       if (unlikely(ret != 0))
+               goto out;
+
        ttm_bo_unmap_virtual(bo);
 
        /**
index d9831559706eb9312cf4803ac43e7113a6e41c5c..4da0e784f9e7dd5851b24d5bfcfe8eac764d8f06 100644 (file)
@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
        int ret;
 
        if (old_mem->mem_type != TTM_PL_SYSTEM) {
-               ttm_tt_unbind(ttm);
                ttm_bo_free_old_node(bo);
                ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
                                TTM_PL_MASK_MEM);
@@ -321,7 +320,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
 }
 
 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                      bool evict, bool no_wait_gpu,
+                      bool evict, bool interruptible,
+                      bool no_wait_gpu,
                       struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -337,6 +337,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
        unsigned long add = 0;
        int dir;
 
+       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+       if (ret)
+               return ret;
+
        ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
        if (ret)
                return ret;
@@ -401,8 +405,7 @@ out2:
        *old_mem = *new_mem;
        new_mem->mm_node = NULL;
 
-       if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
-               ttm_tt_unbind(ttm);
+       if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
                ttm_tt_destroy(ttm);
                bo->ttm = NULL;
        }
@@ -462,6 +465,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        INIT_LIST_HEAD(&fbo->lru);
        INIT_LIST_HEAD(&fbo->swap);
        INIT_LIST_HEAD(&fbo->io_reserve_lru);
+       fbo->moving = NULL;
        drm_vma_node_reset(&fbo->vma_node);
        atomic_set(&fbo->cpu_writers, 0);
 
@@ -634,7 +638,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                              struct fence *fence,
                              bool evict,
-                             bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
        struct ttm_bo_device *bdev = bo->bdev;
@@ -649,9 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                if (ret)
                        return ret;
 
-               if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
-                   (bo->ttm != NULL)) {
-                       ttm_tt_unbind(bo->ttm);
+               if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
                        ttm_tt_destroy(bo->ttm);
                        bo->ttm = NULL;
                }
@@ -665,7 +666,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
                 * operation has completed.
                 */
 
-               set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+               fence_put(bo->moving);
+               bo->moving = fence_get(fence);
 
                ret = ttm_buffer_object_transfer(bo, &ghost_obj);
                if (ret)
@@ -694,3 +696,95 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+                        struct fence *fence, bool evict,
+                        struct ttm_mem_reg *new_mem)
+{
+       struct ttm_bo_device *bdev = bo->bdev;
+       struct ttm_mem_reg *old_mem = &bo->mem;
+
+       struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
+       struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
+
+       int ret;
+
+       reservation_object_add_excl_fence(bo->resv, fence);
+
+       if (!evict) {
+               struct ttm_buffer_object *ghost_obj;
+
+               /**
+                * This should help pipeline ordinary buffer moves.
+                *
+                * Hang old buffer memory on a new buffer object,
+                * and leave it to be released when the GPU
+                * operation has completed.
+                */
+
+               fence_put(bo->moving);
+               bo->moving = fence_get(fence);
+
+               ret = ttm_buffer_object_transfer(bo, &ghost_obj);
+               if (ret)
+                       return ret;
+
+               reservation_object_add_excl_fence(ghost_obj->resv, fence);
+
+               /**
+                * If we're not moving to fixed memory, the TTM object
+                * needs to stay alive. Otherwhise hang it on the ghost
+                * bo to be unbound and destroyed.
+                */
+
+               if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
+                       ghost_obj->ttm = NULL;
+               else
+                       bo->ttm = NULL;
+
+               ttm_bo_unreserve(ghost_obj);
+               ttm_bo_unref(&ghost_obj);
+
+       } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
+
+               /**
+                * BO doesn't have a TTM we need to bind/unbind. Just remember
+                * this eviction and free up the allocation
+                */
+
+               spin_lock(&from->move_lock);
+               if (!from->move || fence_is_later(fence, from->move)) {
+                       fence_put(from->move);
+                       from->move = fence_get(fence);
+               }
+               spin_unlock(&from->move_lock);
+
+               ttm_bo_free_old_node(bo);
+
+               fence_put(bo->moving);
+               bo->moving = fence_get(fence);
+
+       } else {
+               /**
+                * Last resort, wait for the move to be completed.
+                *
+                * Should never happen in pratice.
+                */
+
+               ret = ttm_bo_wait(bo, false, false);
+               if (ret)
+                       return ret;
+
+               if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
+                       ttm_tt_destroy(bo->ttm);
+                       bo->ttm = NULL;
+               }
+               ttm_bo_free_old_node(bo);
+       }
+
+       *old_mem = *new_mem;
+       new_mem->mm_node = NULL;
+
+       return 0;
+}
+EXPORT_SYMBOL(ttm_bo_pipeline_move);
index 3216878bced3d3debd16cca931589706de897795..a6ed9d5e5167fc935f0fceb8bc314db843959c19 100644 (file)
@@ -48,15 +48,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
 {
        int ret = 0;
 
-       if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
+       if (likely(!bo->moving))
                goto out_unlock;
 
        /*
         * Quick non-stalling check for idle.
         */
-       ret = ttm_bo_wait(bo, false, true);
-       if (likely(ret == 0))
-               goto out_unlock;
+       if (fence_is_signaled(bo->moving))
+               goto out_clear;
 
        /*
         * If possible, avoid waiting for GPU with mmap_sem
@@ -68,17 +67,23 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
                        goto out_unlock;
 
                up_read(&vma->vm_mm->mmap_sem);
-               (void) ttm_bo_wait(bo, true, false);
+               (void) fence_wait(bo->moving, true);
                goto out_unlock;
        }
 
        /*
         * Ordinary wait.
         */
-       ret = ttm_bo_wait(bo, true, false);
-       if (unlikely(ret != 0))
+       ret = fence_wait(bo->moving, true);
+       if (unlikely(ret != 0)) {
                ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
                        VM_FAULT_NOPAGE;
+               goto out_unlock;
+       }
+
+out_clear:
+       fence_put(bo->moving);
+       bo->moving = NULL;
 
 out_unlock:
        return ret;
index 077ae9b2865dcda4bb8d9e774a8b4a5cf498bc84..d28d4333dcce091fb36c6d6242b2328e8eb9f3e2 100644 (file)
@@ -166,11 +166,15 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
 
 void ttm_tt_destroy(struct ttm_tt *ttm)
 {
-       if (unlikely(ttm == NULL))
+       int ret;
+
+       if (ttm == NULL)
                return;
 
        if (ttm->state == tt_bound) {
-               ttm_tt_unbind(ttm);
+               ret = ttm->func->unbind(ttm);
+               BUG_ON(ret);
+               ttm->state = tt_unbound;
        }
 
        if (ttm->state == tt_unbound)
@@ -251,17 +255,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
 }
 EXPORT_SYMBOL(ttm_dma_tt_fini);
 
-void ttm_tt_unbind(struct ttm_tt *ttm)
-{
-       int ret;
-
-       if (ttm->state == tt_bound) {
-               ret = ttm->func->unbind(ttm);
-               BUG_ON(ret);
-               ttm->state = tt_unbound;
-       }
-}
-
 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
        int ret = 0;
index 613ab0622d6e4a2dfbfeaf3efee59bfb6c17b7f7..1616ec4f4d84d5a8d7160ba84f8aa8d85fa3e918 100644 (file)
@@ -4,12 +4,7 @@ config DRM_UDL
        depends on USB_SUPPORT
        depends on USB_ARCH_HAS_HCD
        select USB
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
-       select FB_DEFERRED_IO
        select DRM_KMS_HELPER
-        select DRM_KMS_FB_HELPER
        help
          This is a KMS driver for the USB displaylink video adapters.
           Say M/Y to add support for these devices via drm/kms interfaces.
index c20408940cd0685eb1c32c45d0927980e560c8ab..17d34e0edbdd513e4c7860e3ddf77c595bd2fa41 100644 (file)
@@ -94,7 +94,6 @@ static void udl_usb_disconnect(struct usb_interface *interface)
        struct drm_device *dev = usb_get_intfdata(interface);
 
        drm_kms_helper_poll_disable(dev);
-       drm_connector_unregister_all(dev);
        udl_fbdev_unplug(dev);
        udl_drop_usb(dev);
        drm_unplug_dev(dev);
index b87afee44995cac4211ee0d7ed6372617893ea31..f92ea957967471f80f14a83da4c0d71ad3cd4aae 100644 (file)
@@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
 
        spin_lock_irqsave(&dev->event_lock, flags);
        if (event)
-               drm_send_vblank_event(dev, 0, event);
+               drm_crtc_send_vblank_event(crtc, event);
        spin_unlock_irqrestore(&dev->event_lock, flags);
        crtc->primary->fb = fb;
 
index e5a9d3aaf45ff707a691e2b16b45da8f3bc6c177..59adcf8532dd804348d31e4f73249e3561cb5353 100644 (file)
@@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
 
 /* Called on the last userspace/kernel unreference of the BO.  Returns
  * it to the BO cache if possible, otherwise frees it.
- *
- * Note that this is called with the struct_mutex held.
  */
 void vc4_free_object(struct drm_gem_object *gem_bo)
 {
index 904d0754ad78928fc4090b1121ae7537edf14147..8fc2b731b59a613fddce5853981e1ab8fd982837 100644 (file)
@@ -46,12 +46,17 @@ struct vc4_crtc {
        const struct vc4_crtc_data *data;
        void __iomem *regs;
 
+       /* Timestamp at start of vblank irq - unaffected by lock delays. */
+       ktime_t t_vblank;
+
        /* Which HVS channel we're using for our CRTC. */
        int channel;
 
        u8 lut_r[256];
        u8 lut_g[256];
        u8 lut_b[256];
+       /* Size in pixels of the COB memory allocated to this CRTC. */
+       u32 cob_size;
 
        struct drm_pending_vblank_event *event;
 };
@@ -146,6 +151,144 @@ int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
 }
 #endif
 
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+                           unsigned int flags, int *vpos, int *hpos,
+                           ktime_t *stime, ktime_t *etime,
+                           const struct drm_display_mode *mode)
+{
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+       u32 val;
+       int fifo_lines;
+       int vblank_lines;
+       int ret = 0;
+
+       /*
+        * XXX Doesn't work well in interlaced mode yet, partially due
+        * to problems in vc4 kms or drm core interlaced mode handling,
+        * so disable for now in interlaced mode.
+        */
+       if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+               return ret;
+
+       /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+       /* Get optional system timestamp before query. */
+       if (stime)
+               *stime = ktime_get();
+
+       /*
+        * Read vertical scanline which is currently composed for our
+        * pixelvalve by the HVS, and also the scaler status.
+        */
+       val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel));
+
+       /* Get optional system timestamp after query. */
+       if (etime)
+               *etime = ktime_get();
+
+       /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+       /* Vertical position of hvs composed scanline. */
+       *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
+
+       /* No hpos info available. */
+       if (hpos)
+               *hpos = 0;
+
+       /* This is the offset we need for translating hvs -> pv scanout pos. */
+       fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
+
+       if (fifo_lines > 0)
+               ret |= DRM_SCANOUTPOS_VALID;
+
+       /* HVS more than fifo_lines into frame for compositing? */
+       if (*vpos > fifo_lines) {
+               /*
+                * We are in active scanout and can get some meaningful results
+                * from HVS. The actual PV scanout can not trail behind more
+                * than fifo_lines as that is the fifo's capacity. Assume that
+                * in active scanout the HVS and PV work in lockstep wrt. HVS
+                * refilling the fifo and PV consuming from the fifo, ie.
+                * whenever the PV consumes and frees up a scanline in the
+                * fifo, the HVS will immediately refill it, therefore
+                * incrementing vpos. Therefore we choose HVS read position -
+                * fifo size in scanlines as a estimate of the real scanout
+                * position of the PV.
+                */
+               *vpos -= fifo_lines + 1;
+               if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+                       *vpos /= 2;
+
+               ret |= DRM_SCANOUTPOS_ACCURATE;
+               return ret;
+       }
+
+       /*
+        * Less: This happens when we are in vblank and the HVS, after getting
+        * the VSTART restart signal from the PV, just started refilling its
+        * fifo with new lines from the top-most lines of the new framebuffers.
+        * The PV does not scan out in vblank, so does not remove lines from
+        * the fifo, so the fifo will be full quickly and the HVS has to pause.
+        * We can't get meaningful readings wrt. scanline position of the PV
+        * and need to make things up in a approximative but consistent way.
+        */
+       ret |= DRM_SCANOUTPOS_IN_VBLANK;
+       vblank_lines = mode->crtc_vtotal - mode->crtc_vdisplay;
+
+       if (flags & DRM_CALLED_FROM_VBLIRQ) {
+               /*
+                * Assume the irq handler got called close to first
+                * line of vblank, so PV has about a full vblank
+                * scanlines to go, and as a base timestamp use the
+                * one taken at entry into vblank irq handler, so it
+                * is not affected by random delays due to lock
+                * contention on event_lock or vblank_time lock in
+                * the core.
+                */
+               *vpos = -vblank_lines;
+
+               if (stime)
+                       *stime = vc4_crtc->t_vblank;
+               if (etime)
+                       *etime = vc4_crtc->t_vblank;
+
+               /*
+                * If the HVS fifo is not yet full then we know for certain
+                * we are at the very beginning of vblank, as the hvs just
+                * started refilling, and the stime and etime timestamps
+                * truly correspond to start of vblank.
+                */
+               if ((val & SCALER_DISPSTATX_FULL) != SCALER_DISPSTATX_FULL)
+                       ret |= DRM_SCANOUTPOS_ACCURATE;
+       } else {
+               /*
+                * No clue where we are inside vblank. Return a vpos of zero,
+                * which will cause calling code to just return the etime
+                * timestamp uncorrected. At least this is no worse than the
+                * standard fallback.
+                */
+               *vpos = 0;
+       }
+
+       return ret;
+}
+
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+                                 int *max_error, struct timeval *vblank_time,
+                                 unsigned flags)
+{
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
+       struct drm_crtc *crtc = &vc4_crtc->base;
+       struct drm_crtc_state *state = crtc->state;
+
+       /* Helper routine in DRM core does all the work: */
+       return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc_id, max_error,
+                                                    vblank_time, flags,
+                                                    &state->adjusted_mode);
+}
+
 static void vc4_crtc_destroy(struct drm_crtc *crtc)
 {
        drm_crtc_cleanup(crtc);
@@ -175,20 +318,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
                HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
 }
 
-static void
+static int
 vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-                  uint32_t start, uint32_t size)
+                  uint32_t size)
 {
        struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
        u32 i;
 
-       for (i = start; i < start + size; i++) {
+       for (i = 0; i < size; i++) {
                vc4_crtc->lut_r[i] = r[i] >> 8;
                vc4_crtc->lut_g[i] = g[i] >> 8;
                vc4_crtc->lut_b[i] = b[i] >> 8;
        }
 
        vc4_crtc_lut_load(crtc);
+
+       return 0;
 }
 
 static u32 vc4_get_fifo_full_level(u32 format)
@@ -395,6 +540,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct drm_plane *plane;
        unsigned long flags;
+       const struct drm_plane_state *plane_state;
        u32 dlist_count = 0;
        int ret;
 
@@ -404,18 +550,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
        if (hweight32(state->connector_mask) > 1)
                return -EINVAL;
 
-       drm_atomic_crtc_state_for_each_plane(plane, state) {
-               struct drm_plane_state *plane_state =
-                       state->state->plane_states[drm_plane_index(plane)];
-
-               /* plane might not have changed, in which case take
-                * current state:
-                */
-               if (!plane_state)
-                       plane_state = plane->state;
-
+       drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
                dlist_count += vc4_plane_dlist_size(plane_state);
-       }
 
        dlist_count++; /* Account for SCALER_CTL0_END. */
 
@@ -456,14 +592,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
 
        WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
 
-       HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
-                 vc4_state->mm.start);
-
-       if (debug_dump_regs) {
-               DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
-               vc4_hvs_dump_state(dev);
-       }
-
        if (crtc->state->event) {
                unsigned long flags;
 
@@ -473,8 +601,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
 
                spin_lock_irqsave(&dev->event_lock, flags);
                vc4_crtc->event = crtc->state->event;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
                crtc->state->event = NULL;
+
+               HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+                         vc4_state->mm.start);
+
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       } else {
+               HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+                         vc4_state->mm.start);
+       }
+
+       if (debug_dump_regs) {
+               DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+               vc4_hvs_dump_state(dev);
        }
 }
 
@@ -500,12 +640,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
 {
        struct drm_crtc *crtc = &vc4_crtc->base;
        struct drm_device *dev = crtc->dev;
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+       u32 chan = vc4_crtc->channel;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (vc4_crtc->event) {
+       if (vc4_crtc->event &&
+           (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
                drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
                vc4_crtc->event = NULL;
+               drm_crtc_vblank_put(crtc);
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
@@ -517,6 +662,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
        irqreturn_t ret = IRQ_NONE;
 
        if (stat & PV_INT_VFP_START) {
+               vc4_crtc->t_vblank = ktime_get();
                CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
                drm_crtc_handle_vblank(&vc4_crtc->base);
                vc4_crtc_handle_page_flip(vc4_crtc);
@@ -556,6 +702,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
                spin_unlock_irqrestore(&dev->event_lock, flags);
        }
 
+       drm_crtc_vblank_put(crtc);
        drm_framebuffer_unreference(flip_state->fb);
        kfree(flip_state);
 
@@ -598,6 +745,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
                return ret;
        }
 
+       WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
        /* Immediately update the plane's legacy fb pointer, so that later
         * modeset prep sees the state that will be present when the semaphore
         * is released.
@@ -718,6 +867,22 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
        }
 }
 
+static void
+vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc)
+{
+       struct drm_device *drm = vc4_crtc->base.dev;
+       struct vc4_dev *vc4 = to_vc4_dev(drm);
+       u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel));
+       /* Top/base are supposed to be 4-pixel aligned, but the
+        * Raspberry Pi firmware fills the low bits (which are
+        * presumably ignored).
+        */
+       u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
+       u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
+
+       vc4_crtc->cob_size = top - base + 4;
+}
+
 static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -794,6 +959,8 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
                crtc->cursor = cursor_plane;
        }
 
+       vc4_crtc_get_cob_allocation(vc4_crtc);
+
        CRTC_WRITE(PV_INTEN, 0);
        CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
        ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
index 9817dbfa4ac315b76538b9dc2bace8a86b8a9419..275fedbdbd9eae420939627e7fcdf9236cfce4fa 100644 (file)
@@ -208,14 +208,6 @@ static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
        return 0;
 }
 
-static struct drm_encoder *
-vc4_dpi_connector_best_encoder(struct drm_connector *connector)
-{
-       struct vc4_dpi_connector *dpi_connector =
-               to_vc4_dpi_connector(connector);
-       return dpi_connector->encoder;
-}
-
 static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = vc4_dpi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
 
 static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
        .get_modes = vc4_dpi_connector_get_modes,
-       .best_encoder = vc4_dpi_connector_best_encoder,
 };
 
 static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
@@ -236,14 +227,12 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
 {
        struct drm_connector *connector = NULL;
        struct vc4_dpi_connector *dpi_connector;
-       int ret = 0;
 
        dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
                                     GFP_KERNEL);
-       if (!dpi_connector) {
-               ret = -ENOMEM;
-               goto fail;
-       }
+       if (!dpi_connector)
+               return ERR_PTR(-ENOMEM);
+
        connector = &dpi_connector->base;
 
        dpi_connector->encoder = dpi->encoder;
@@ -260,12 +249,6 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
        drm_mode_connector_attach_encoder(connector, dpi->encoder);
 
        return connector;
-
- fail:
-       if (connector)
-               vc4_dpi_connector_destroy(connector);
-
-       return ERR_PTR(ret);
 }
 
 static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
index 3446ece21b4ab4e2d38280ad0b2c5733207ffd20..8b42d31a7f0e8b61ed776200841285d7d5687dbb 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include "drm_fb_cma_helper.h"
 
 #include "uapi/drm/vc4_drm.h"
@@ -43,12 +44,54 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
        return map;
 }
 
+static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
+                              struct drm_file *file_priv)
+{
+       struct vc4_dev *vc4 = to_vc4_dev(dev);
+       struct drm_vc4_get_param *args = data;
+       int ret;
+
+       if (args->pad != 0)
+               return -EINVAL;
+
+       switch (args->param) {
+       case DRM_VC4_PARAM_V3D_IDENT0:
+               ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+               if (ret)
+                       return ret;
+               args->value = V3D_READ(V3D_IDENT0);
+               pm_runtime_put(&vc4->v3d->pdev->dev);
+               break;
+       case DRM_VC4_PARAM_V3D_IDENT1:
+               ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+               if (ret)
+                       return ret;
+               args->value = V3D_READ(V3D_IDENT1);
+               pm_runtime_put(&vc4->v3d->pdev->dev);
+               break;
+       case DRM_VC4_PARAM_V3D_IDENT2:
+               ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+               if (ret)
+                       return ret;
+               args->value = V3D_READ(V3D_IDENT2);
+               pm_runtime_put(&vc4->v3d->pdev->dev);
+               break;
+       case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
+               args->value = true;
+               break;
+       default:
+               DRM_DEBUG("Unknown parameter %d\n", args->param);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static void vc4_lastclose(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
-       if (vc4->fbdev)
-               drm_fbdev_cma_restore_mode(vc4->fbdev);
+       drm_fbdev_cma_restore_mode(vc4->fbdev);
 }
 
 static const struct file_operations vc4_drm_fops = {
@@ -66,14 +109,15 @@ static const struct file_operations vc4_drm_fops = {
 };
 
 static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
-       DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
-       DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+       DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
                          DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW),
 };
 
 static struct drm_driver vc4_drm_driver = {
@@ -91,7 +135,9 @@ static struct drm_driver vc4_drm_driver = {
 
        .enable_vblank = vc4_enable_vblank,
        .disable_vblank = vc4_disable_vblank,
-       .get_vblank_counter = drm_vblank_count,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
+       .get_scanout_position = vc4_crtc_get_scanoutpos,
+       .get_vblank_timestamp = vc4_crtc_get_vblank_timestamp,
 
 #if defined(CONFIG_DEBUG_FS)
        .debugfs_init = vc4_debugfs_init,
@@ -99,7 +145,7 @@ static struct drm_driver vc4_drm_driver = {
 #endif
 
        .gem_create_object = vc4_create_object,
-       .gem_free_object = vc4_free_object,
+       .gem_free_object_unlocked = vc4_free_object,
        .gem_vm_ops = &drm_gem_cma_vm_ops,
 
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -176,7 +222,6 @@ static int vc4_drm_bind(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct drm_device *drm;
-       struct drm_connector *connector;
        struct vc4_dev *vc4;
        int ret = 0;
 
@@ -196,8 +241,6 @@ static int vc4_drm_bind(struct device *dev)
        vc4_bo_cache_init(drm);
 
        drm_mode_config_init(drm);
-       if (ret)
-               goto unref;
 
        vc4_gem_init(drm);
 
@@ -211,27 +254,14 @@ static int vc4_drm_bind(struct device *dev)
        if (ret < 0)
                goto unbind_all;
 
-       /* Connector registration has to occur after DRM device
-        * registration, because it creates sysfs entries based on the
-        * DRM device.
-        */
-       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               ret = drm_connector_register(connector);
-               if (ret)
-                       goto unregister;
-       }
-
        vc4_kms_load(drm);
 
        return 0;
 
-unregister:
-       drm_dev_unregister(drm);
 unbind_all:
        component_unbind_all(dev, drm);
 gem_destroy:
        vc4_gem_destroy(drm);
-unref:
        drm_dev_unref(drm);
        vc4_bo_cache_destroy(drm);
        return ret;
@@ -259,8 +289,8 @@ static const struct component_master_ops vc4_drm_ops = {
 static struct platform_driver *const component_drivers[] = {
        &vc4_hdmi_driver,
        &vc4_dpi_driver,
-       &vc4_crtc_driver,
        &vc4_hvs_driver,
+       &vc4_crtc_driver,
        &vc4_v3d_driver,
 };
 
index 37cac59401d734e047f4cf41cc6a9ecb5bfc6dbc..489e3de0c050df0eb3022afa421c579449c3ebdf 100644 (file)
@@ -355,6 +355,9 @@ struct vc4_validated_shader_info {
        uint32_t uniforms_src_size;
        uint32_t num_texture_samples;
        struct vc4_texture_sample_info *texture_samples;
+
+       uint32_t num_uniform_addr_offsets;
+       uint32_t *uniform_addr_offsets;
 };
 
 /**
@@ -415,6 +418,13 @@ extern struct platform_driver vc4_crtc_driver;
 int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
 void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
+int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
+                           unsigned int flags, int *vpos, int *hpos,
+                           ktime_t *stime, ktime_t *etime,
+                           const struct drm_display_mode *mode);
+int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
+                                 int *max_error, struct timeval *vblank_time,
+                                 unsigned flags);
 
 /* vc4_debugfs.c */
 int vc4_debugfs_init(struct drm_minor *minor);
@@ -469,7 +479,7 @@ int vc4_kms_load(struct drm_device *dev);
 struct drm_plane *vc4_plane_init(struct drm_device *dev,
                                 enum drm_plane_type type);
 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
-u32 vc4_plane_dlist_size(struct drm_plane_state *state);
+u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
 void vc4_plane_async_set_fb(struct drm_plane *plane,
                            struct drm_framebuffer *fb);
 
index 46899d6de6753c28c7ca6c721119a28d72792148..6155e8aca1c6490039f14cd8addcf467a7de8eb4 100644 (file)
@@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
 {
        unsigned int i;
 
-       mutex_lock(&dev->struct_mutex);
        for (i = 0; i < state->user_state.bo_count; i++)
-               drm_gem_object_unreference(state->bo[i]);
-       mutex_unlock(&dev->struct_mutex);
+               drm_gem_object_unreference_unlocked(state->bo[i]);
 
        kfree(state);
 }
@@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        unsigned i;
 
-       /* Need the struct lock for drm_gem_object_unreference(). */
-       mutex_lock(&dev->struct_mutex);
        if (exec->bo) {
                for (i = 0; i < exec->bo_count; i++)
-                       drm_gem_object_unreference(&exec->bo[i]->base);
+                       drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
                kfree(exec->bo);
        }
 
@@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
                struct vc4_bo *bo = list_first_entry(&exec->unref_list,
                                                     struct vc4_bo, unref_head);
                list_del(&bo->unref_head);
-               drm_gem_object_unreference(&bo->base.base);
+               drm_gem_object_unreference_unlocked(&bo->base.base);
        }
-       mutex_unlock(&dev->struct_mutex);
 
        mutex_lock(&vc4->power_lock);
        if (--vc4->power_refcount == 0)
index fd2644d231ff867a4409840eba7f8234a4a1faba..4452f3631cacea37bbd5dc8a594367631e308adc 100644 (file)
@@ -208,14 +208,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
        return ret;
 }
 
-static struct drm_encoder *
-vc4_hdmi_connector_best_encoder(struct drm_connector *connector)
-{
-       struct vc4_hdmi_connector *hdmi_connector =
-               to_vc4_hdmi_connector(connector);
-       return hdmi_connector->encoder;
-}
-
 static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = vc4_hdmi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
 
 static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
        .get_modes = vc4_hdmi_connector_get_modes,
-       .best_encoder = vc4_hdmi_connector_best_encoder,
 };
 
 static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
@@ -465,12 +456,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
        if (IS_ERR(hdmi->hd_regs))
                return PTR_ERR(hdmi->hd_regs);
 
-       ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
-       if (!ddc_node) {
-               DRM_ERROR("Failed to find ddc node in device tree\n");
-               return -ENODEV;
-       }
-
        hdmi->pixel_clock = devm_clk_get(dev, "pixel");
        if (IS_ERR(hdmi->pixel_clock)) {
                DRM_ERROR("Failed to get pixel clock\n");
@@ -482,7 +467,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
                return PTR_ERR(hdmi->hsm_clock);
        }
 
+       ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
+       if (!ddc_node) {
+               DRM_ERROR("Failed to find ddc node in device tree\n");
+               return -ENODEV;
+       }
+
        hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
+       of_node_put(ddc_node);
        if (!hdmi->ddc) {
                DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
                return -EPROBE_DEFER;
index cb37751bc99fcfe8976ced75f16ce4bd332c2406..4ac894d993cd80472777b2a3bacc2fcf0dd52810 100644 (file)
@@ -26,8 +26,7 @@ static void vc4_output_poll_changed(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
 
-       if (vc4->fbdev)
-               drm_fbdev_cma_hotplug_event(vc4->fbdev);
+       drm_fbdev_cma_hotplug_event(vc4->fbdev);
 }
 
 struct vc4_commit {
@@ -111,16 +110,26 @@ static int vc4_atomic_commit(struct drm_device *dev,
        int i;
        uint64_t wait_seqno = 0;
        struct vc4_commit *c;
+       struct drm_plane *plane;
+       struct drm_plane_state *new_state;
 
        c = commit_init(state);
        if (!c)
                return -ENOMEM;
 
        /* Make sure that any outstanding modesets have finished. */
-       ret = down_interruptible(&vc4->async_modeset);
-       if (ret) {
-               kfree(c);
-               return ret;
+       if (nonblock) {
+               ret = down_trylock(&vc4->async_modeset);
+               if (ret) {
+                       kfree(c);
+                       return -EBUSY;
+               }
+       } else {
+               ret = down_interruptible(&vc4->async_modeset);
+               if (ret) {
+                       kfree(c);
+                       return ret;
+               }
        }
 
        ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -130,13 +139,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
                return ret;
        }
 
-       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
-               struct drm_plane *plane = state->planes[i];
-               struct drm_plane_state *new_state = state->plane_states[i];
-
-               if (!plane)
-                       continue;
-
+       for_each_plane_in_state(state, plane, new_state, i) {
                if ((plane->state->fb != new_state->fb) && new_state->fb) {
                        struct drm_gem_cma_object *cma_bo =
                                drm_fb_cma_get_gem_obj(new_state->fb, 0);
@@ -152,7 +155,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
         * the software side now.
         */
 
-       drm_atomic_helper_swap_state(dev, state);
+       drm_atomic_helper_swap_state(state, true);
 
        /*
         * Everything below can be run asynchronously without the need to grab
index 4037b52fde31a2dbe4dec8c477751900d17411f2..29e4b400e25e34a63e4710e9edbba62541b9cdc5 100644 (file)
@@ -93,6 +93,14 @@ static const struct hvs_format {
                .drm = DRM_FORMAT_ARGB8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
                .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
        },
+       {
+               .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+               .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true,
+       },
+       {
+               .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
+               .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false,
+       },
        {
                .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
                .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
@@ -690,9 +698,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
        return vc4_state->dlist_count;
 }
 
-u32 vc4_plane_dlist_size(struct drm_plane_state *state)
+u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
 {
-       struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+       const struct vc4_plane_state *vc4_state =
+               container_of(state, typeof(*vc4_state), base);
 
        return vc4_state->dlist_count;
 }
index d5c2f3c85ebbf6b1ae7ca731d12d604ca24b176e..f4e795a0d3f6125519431da466b41e12726d00f0 100644 (file)
@@ -70,7 +70,7 @@ enum qpu_raddr {
        QPU_R_ELEM_QPU = 38,
        QPU_R_NOP,
        QPU_R_XY_PIXEL_COORD = 41,
-       QPU_R_MS_REV_FLAGS = 41,
+       QPU_R_MS_REV_FLAGS = 42,
        QPU_R_VPM = 48,
        QPU_R_VPM_LD_BUSY,
        QPU_R_VPM_LD_WAIT,
@@ -230,6 +230,15 @@ enum qpu_unpack_r4 {
 #define QPU_COND_MUL_SHIFT              46
 #define QPU_COND_MUL_MASK               QPU_MASK(48, 46)
 
+#define QPU_BRANCH_COND_SHIFT           52
+#define QPU_BRANCH_COND_MASK            QPU_MASK(55, 52)
+
+#define QPU_BRANCH_REL                  ((uint64_t)1 << 51)
+#define QPU_BRANCH_REG                  ((uint64_t)1 << 50)
+
+#define QPU_BRANCH_RADDR_A_SHIFT        45
+#define QPU_BRANCH_RADDR_A_MASK         QPU_MASK(49, 45)
+
 #define QPU_SF                          ((uint64_t)1 << 45)
 
 #define QPU_WADDR_ADD_SHIFT             38
@@ -261,4 +270,10 @@ enum qpu_unpack_r4 {
 #define QPU_OP_ADD_SHIFT                24
 #define QPU_OP_ADD_MASK                 QPU_MASK(28, 24)
 
+#define QPU_LOAD_IMM_SHIFT              0
+#define QPU_LOAD_IMM_MASK               QPU_MASK(31, 0)
+
+#define QPU_BRANCH_TARGET_SHIFT         0
+#define QPU_BRANCH_TARGET_MASK          QPU_MASK(31, 0)
+
 #endif /* VC4_QPU_DEFINES_H */
index 6163b95c54111bde8c20bfd8e2efd233cb90b6ae..160942a9180e716e6c6d890214f0fc6e0e081ecd 100644 (file)
 #define SCALER_DISPLACT0                        0x00000030
 #define SCALER_DISPLACT1                        0x00000034
 #define SCALER_DISPLACT2                        0x00000038
+#define SCALER_DISPLACTX(x)                    (SCALER_DISPLACT0 +     \
+                                                (x) * (SCALER_DISPLACT1 - \
+                                                       SCALER_DISPLACT0))
+
 #define SCALER_DISPCTRL0                        0x00000040
 # define SCALER_DISPCTRLX_ENABLE               BIT(31)
 # define SCALER_DISPCTRLX_RESET                        BIT(30)
 # define SCALER_DISPBKGND_FILL                 BIT(24)
 
 #define SCALER_DISPSTAT0                        0x00000048
-#define SCALER_DISPBASE0                        0x0000004c
 # define SCALER_DISPSTATX_MODE_MASK            VC4_MASK(31, 30)
 # define SCALER_DISPSTATX_MODE_SHIFT           30
 # define SCALER_DISPSTATX_MODE_DISABLED                0
 # define SCALER_DISPSTATX_MODE_EOF             3
 # define SCALER_DISPSTATX_FULL                 BIT(29)
 # define SCALER_DISPSTATX_EMPTY                        BIT(28)
+# define SCALER_DISPSTATX_FRAME_COUNT_MASK     VC4_MASK(17, 12)
+# define SCALER_DISPSTATX_FRAME_COUNT_SHIFT    12
+# define SCALER_DISPSTATX_LINE_MASK            VC4_MASK(11, 0)
+# define SCALER_DISPSTATX_LINE_SHIFT           0
+
+#define SCALER_DISPBASE0                        0x0000004c
+/* Last pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel.  Must be 4-pixel aligned (and thus 4 pixels less than the
+ * next COB base).
+ */
+# define SCALER_DISPBASEX_TOP_MASK             VC4_MASK(31, 16)
+# define SCALER_DISPBASEX_TOP_SHIFT            16
+/* First pixel in the COB (display FIFO memory) allocated to this HVS
+ * channel.  Must be 4-pixel aligned.
+ */
+# define SCALER_DISPBASEX_BASE_MASK            VC4_MASK(15, 0)
+# define SCALER_DISPBASEX_BASE_SHIFT           0
+
 #define SCALER_DISPCTRL1                        0x00000050
 #define SCALER_DISPBKGND1                       0x00000054
 #define SCALER_DISPBKGNDX(x)                   (SCALER_DISPBKGND0 +        \
                                                 (x) * (SCALER_DISPSTAT1 - \
                                                        SCALER_DISPSTAT0))
 #define SCALER_DISPBASE1                        0x0000005c
+#define SCALER_DISPBASEX(x)                    (SCALER_DISPBASE0 +        \
+                                                (x) * (SCALER_DISPBASE1 - \
+                                                       SCALER_DISPBASE0))
 #define SCALER_DISPCTRL2                        0x00000060
 #define SCALER_DISPCTRLX(x)                    (SCALER_DISPCTRL0 +        \
                                                 (x) * (SCALER_DISPCTRL1 - \
index 24c2c746e8f397aafd0285323508bd2ee351d3a2..9ce1d0adf8826e893a58529328ce3f53f05150ee 100644 (file)
@@ -802,7 +802,7 @@ validate_gl_shader_rec(struct drm_device *dev,
                uint32_t src_offset = *(uint32_t *)(pkt_u + o);
                uint32_t *texture_handles_u;
                void *uniform_data_u;
-               uint32_t tex;
+               uint32_t tex, uni;
 
                *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
 
@@ -840,6 +840,17 @@ validate_gl_shader_rec(struct drm_device *dev,
                        }
                }
 
+               /* Fill in the uniform slots that need this shader's
+                * start-of-uniforms address (used for resetting the uniform
+                * stream in the presence of control flow).
+                */
+               for (uni = 0;
+                    uni < validated_shader->num_uniform_addr_offsets;
+                    uni++) {
+                       uint32_t o = validated_shader->uniform_addr_offsets[uni];
+                       ((uint32_t *)exec->uniforms_v)[o] = exec->uniforms_p;
+               }
+
                *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
 
                exec->uniforms_u += validated_shader->uniforms_src_size;
index f67124b4c5348706136e4c0f9fe06ea10b77b230..46527e989ce37839b0bd76d27dd0d5a4821bf58c 100644 (file)
 #include "vc4_drv.h"
 #include "vc4_qpu_defines.h"
 
+#define LIVE_REG_COUNT (32 + 32 + 4)
+
 struct vc4_shader_validation_state {
+       /* Current IP being validated. */
+       uint32_t ip;
+
+       /* IP at the end of the BO, do not read shader[max_ip] */
+       uint32_t max_ip;
+
+       uint64_t *shader;
+
        struct vc4_texture_sample_info tmu_setup[2];
        int tmu_write_count[2];
 
@@ -49,8 +59,30 @@ struct vc4_shader_validation_state {
         *
         * This is used for the validation of direct address memory reads.
         */
-       uint32_t live_min_clamp_offsets[32 + 32 + 4];
-       bool live_max_clamp_regs[32 + 32 + 4];
+       uint32_t live_min_clamp_offsets[LIVE_REG_COUNT];
+       bool live_max_clamp_regs[LIVE_REG_COUNT];
+       uint32_t live_immediates[LIVE_REG_COUNT];
+
+       /* Bitfield of which IPs are used as branch targets.
+        *
+        * Used for validation that the uniform stream is updated at the right
+        * points and clearing the texturing/clamping state.
+        */
+       unsigned long *branch_targets;
+
+       /* Set when entering a basic block, and cleared when the uniform
+        * address update is found.  This is used to make sure that we don't
+        * read uniforms when the address is undefined.
+        */
+       bool needs_uniform_address_update;
+
+       /* Set when we find a backwards branch.  If the branch is backwards,
+        * the taraget is probably doing an address reset to read uniforms,
+        * and so we need to be sure that a uniforms address is present in the
+        * stream, even if the shader didn't need to read uniforms in later
+        * basic blocks.
+        */
+       bool needs_uniform_address_for_loop;
 };
 
 static uint32_t
@@ -129,11 +161,11 @@ record_texture_sample(struct vc4_validated_shader_info *validated_shader,
 }
 
 static bool
-check_tmu_write(uint64_t inst,
-               struct vc4_validated_shader_info *validated_shader,
+check_tmu_write(struct vc4_validated_shader_info *validated_shader,
                struct vc4_shader_validation_state *validation_state,
                bool is_mul)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t waddr = (is_mul ?
                          QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
                          QPU_GET_FIELD(inst, QPU_WADDR_ADD));
@@ -162,7 +194,7 @@ check_tmu_write(uint64_t inst,
                        return false;
                }
 
-               /* We assert that the the clamped address is the first
+               /* We assert that the clamped address is the first
                 * argument, and the UBO base address is the second argument.
                 * This is arbitrary, but simpler than supporting flipping the
                 * two either way.
@@ -212,8 +244,14 @@ check_tmu_write(uint64_t inst,
        /* Since direct uses a RADDR uniform reference, it will get counted in
         * check_instruction_reads()
         */
-       if (!is_direct)
+       if (!is_direct) {
+               if (validation_state->needs_uniform_address_update) {
+                       DRM_ERROR("Texturing with undefined uniform address\n");
+                       return false;
+               }
+
                validated_shader->uniforms_size += 4;
+       }
 
        if (submit) {
                if (!record_texture_sample(validated_shader,
@@ -227,23 +265,138 @@ check_tmu_write(uint64_t inst,
        return true;
 }
 
+static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader)
+{
+       uint32_t o = validated_shader->num_uniform_addr_offsets;
+       uint32_t num_uniforms = validated_shader->uniforms_size / 4;
+
+       validated_shader->uniform_addr_offsets =
+               krealloc(validated_shader->uniform_addr_offsets,
+                        (o + 1) *
+                        sizeof(*validated_shader->uniform_addr_offsets),
+                        GFP_KERNEL);
+       if (!validated_shader->uniform_addr_offsets)
+               return false;
+
+       validated_shader->uniform_addr_offsets[o] = num_uniforms;
+       validated_shader->num_uniform_addr_offsets++;
+
+       return true;
+}
+
 static bool
-check_reg_write(uint64_t inst,
-               struct vc4_validated_shader_info *validated_shader,
+validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader,
+                              struct vc4_shader_validation_state *validation_state,
+                              bool is_mul)
+{
+       uint64_t inst = validation_state->shader[validation_state->ip];
+       u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
+       u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
+       u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
+       u32 add_lri = raddr_add_a_to_live_reg_index(inst);
+       /* We want our reset to be pointing at whatever uniform follows the
+        * uniforms base address.
+        */
+       u32 expected_offset = validated_shader->uniforms_size + 4;
+
+       /* We only support absolute uniform address changes, and we
+        * require that they be in the current basic block before any
+        * of its uniform reads.
+        *
+        * One could potentially emit more efficient QPU code, by
+        * noticing that (say) an if statement does uniform control
+        * flow for all threads and that the if reads the same number
+        * of uniforms on each side.  However, this scheme is easy to
+        * validate so it's all we allow for now.
+        */
+
+       if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
+               DRM_ERROR("uniforms address change must be "
+                         "normal math\n");
+               return false;
+       }
+
+       if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
+               DRM_ERROR("Uniform address reset must be an ADD.\n");
+               return false;
+       }
+
+       if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
+               DRM_ERROR("Uniform address reset must be unconditional.\n");
+               return false;
+       }
+
+       if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
+           !(inst & QPU_PM)) {
+               DRM_ERROR("No packing allowed on uniforms reset\n");
+               return false;
+       }
+
+       if (add_lri == -1) {
+               DRM_ERROR("First argument of uniform address write must be "
+                         "an immediate value.\n");
+               return false;
+       }
+
+       if (validation_state->live_immediates[add_lri] != expected_offset) {
+               DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
+                         validation_state->live_immediates[add_lri],
+                         expected_offset);
+               return false;
+       }
+
+       if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
+           !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
+               DRM_ERROR("Second argument of uniform address write must be "
+                         "a uniform.\n");
+               return false;
+       }
+
+       validation_state->needs_uniform_address_update = false;
+       validation_state->needs_uniform_address_for_loop = false;
+       return require_uniform_address_uniform(validated_shader);
+}
+
+static bool
+check_reg_write(struct vc4_validated_shader_info *validated_shader,
                struct vc4_shader_validation_state *validation_state,
                bool is_mul)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t waddr = (is_mul ?
                          QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
                          QPU_GET_FIELD(inst, QPU_WADDR_ADD));
+       uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+       bool ws = inst & QPU_WS;
+       bool is_b = is_mul ^ ws;
+       u32 lri = waddr_to_live_reg_index(waddr, is_b);
+
+       if (lri != -1) {
+               uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
+               uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL);
+
+               if (sig == QPU_SIG_LOAD_IMM &&
+                   QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP &&
+                   ((is_mul && cond_mul == QPU_COND_ALWAYS) ||
+                    (!is_mul && cond_add == QPU_COND_ALWAYS))) {
+                       validation_state->live_immediates[lri] =
+                               QPU_GET_FIELD(inst, QPU_LOAD_IMM);
+               } else {
+                       validation_state->live_immediates[lri] = ~0;
+               }
+       }
 
        switch (waddr) {
        case QPU_W_UNIFORMS_ADDRESS:
-               /* XXX: We'll probably need to support this for reladdr, but
-                * it's definitely a security-related one.
-                */
-               DRM_ERROR("uniforms address load unsupported\n");
-               return false;
+               if (is_b) {
+                       DRM_ERROR("relative uniforms address change "
+                                 "unsupported\n");
+                       return false;
+               }
+
+               return validate_uniform_address_write(validated_shader,
+                                                     validation_state,
+                                                     is_mul);
 
        case QPU_W_TLB_COLOR_MS:
        case QPU_W_TLB_COLOR_ALL:
@@ -261,7 +414,7 @@ check_reg_write(uint64_t inst,
        case QPU_W_TMU1_T:
        case QPU_W_TMU1_R:
        case QPU_W_TMU1_B:
-               return check_tmu_write(inst, validated_shader, validation_state,
+               return check_tmu_write(validated_shader, validation_state,
                                       is_mul);
 
        case QPU_W_HOST_INT:
@@ -294,10 +447,10 @@ check_reg_write(uint64_t inst,
 }
 
 static void
-track_live_clamps(uint64_t inst,
-                 struct vc4_validated_shader_info *validated_shader,
+track_live_clamps(struct vc4_validated_shader_info *validated_shader,
                  struct vc4_shader_validation_state *validation_state)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
        uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
        uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
@@ -369,10 +522,10 @@ track_live_clamps(uint64_t inst,
 }
 
 static bool
-check_instruction_writes(uint64_t inst,
-                        struct vc4_validated_shader_info *validated_shader,
+check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
                         struct vc4_shader_validation_state *validation_state)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
        uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
        bool ok;
@@ -382,20 +535,44 @@ check_instruction_writes(uint64_t inst,
                return false;
        }
 
-       ok = (check_reg_write(inst, validated_shader, validation_state,
-                             false) &&
-             check_reg_write(inst, validated_shader, validation_state,
-                             true));
+       ok = (check_reg_write(validated_shader, validation_state, false) &&
+             check_reg_write(validated_shader, validation_state, true));
 
-       track_live_clamps(inst, validated_shader, validation_state);
+       track_live_clamps(validated_shader, validation_state);
 
        return ok;
 }
 
 static bool
-check_instruction_reads(uint64_t inst,
-                       struct vc4_validated_shader_info *validated_shader)
+check_branch(uint64_t inst,
+            struct vc4_validated_shader_info *validated_shader,
+            struct vc4_shader_validation_state *validation_state,
+            int ip)
+{
+       int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+       uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
+       uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
+
+       if ((int)branch_imm < 0)
+               validation_state->needs_uniform_address_for_loop = true;
+
+       /* We don't want to have to worry about validation of this, and
+        * there's no need for it.
+        */
+       if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
+               DRM_ERROR("branch instruction at %d wrote a register.\n",
+                         validation_state->ip);
+               return false;
+       }
+
+       return true;
+}
+
+static bool
+check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
+                       struct vc4_shader_validation_state *validation_state)
 {
+       uint64_t inst = validation_state->shader[validation_state->ip];
        uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
        uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
        uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
@@ -407,40 +584,204 @@ check_instruction_reads(uint64_t inst,
                 * already be OOM.
                 */
                validated_shader->uniforms_size += 4;
+
+               if (validation_state->needs_uniform_address_update) {
+                       DRM_ERROR("Uniform read with undefined uniform "
+                                 "address\n");
+                       return false;
+               }
+       }
+
+       return true;
+}
+
+/* Make sure that all branches are absolute and point within the shader, and
+ * note their targets for later.
+ */
+static bool
+vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
+{
+       uint32_t max_branch_target = 0;
+       bool found_shader_end = false;
+       int ip;
+       int shader_end_ip = 0;
+       int last_branch = -2;
+
+       for (ip = 0; ip < validation_state->max_ip; ip++) {
+               uint64_t inst = validation_state->shader[ip];
+               int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
+               uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
+               uint32_t after_delay_ip = ip + 4;
+               uint32_t branch_target_ip;
+
+               if (sig == QPU_SIG_PROG_END) {
+                       shader_end_ip = ip;
+                       found_shader_end = true;
+                       continue;
+               }
+
+               if (sig != QPU_SIG_BRANCH)
+                       continue;
+
+               if (ip - last_branch < 4) {
+                       DRM_ERROR("Branch at %d during delay slots\n", ip);
+                       return false;
+               }
+               last_branch = ip;
+
+               if (inst & QPU_BRANCH_REG) {
+                       DRM_ERROR("branching from register relative "
+                                 "not supported\n");
+                       return false;
+               }
+
+               if (!(inst & QPU_BRANCH_REL)) {
+                       DRM_ERROR("relative branching required\n");
+                       return false;
+               }
+
+               /* The actual branch target is the instruction after the delay
+                * slots, plus whatever byte offset is in the low 32 bits of
+                * the instruction.  Make sure we're not branching beyond the
+                * end of the shader object.
+                */
+               if (branch_imm % sizeof(inst) != 0) {
+                       DRM_ERROR("branch target not aligned\n");
+                       return false;
+               }
+
+               branch_target_ip = after_delay_ip + (branch_imm >> 3);
+               if (branch_target_ip >= validation_state->max_ip) {
+                       DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
+                                 ip, branch_target_ip,
+                                 validation_state->max_ip);
+                       return false;
+               }
+               set_bit(branch_target_ip, validation_state->branch_targets);
+
+               /* Make sure that the non-branching path is also not outside
+                * the shader.
+                */
+               if (after_delay_ip >= validation_state->max_ip) {
+                       DRM_ERROR("Branch at %d continues past shader end "
+                                 "(%d/%d)\n",
+                                 ip, after_delay_ip, validation_state->max_ip);
+                       return false;
+               }
+               set_bit(after_delay_ip, validation_state->branch_targets);
+               max_branch_target = max(max_branch_target, after_delay_ip);
+
+               /* There are two delay slots after program end is signaled
+                * that are still executed, then we're finished.
+                */
+               if (found_shader_end && ip == shader_end_ip + 2)
+                       break;
+       }
+
+       if (max_branch_target > shader_end_ip) {
+               DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
+               return false;
        }
 
        return true;
 }
 
+/* Resets any known state for the shader, used when we may be branched to from
+ * multiple locations in the program (or at shader start).
+ */
+static void
+reset_validation_state(struct vc4_shader_validation_state *validation_state)
+{
+       int i;
+
+       for (i = 0; i < 8; i++)
+               validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0;
+
+       for (i = 0; i < LIVE_REG_COUNT; i++) {
+               validation_state->live_min_clamp_offsets[i] = ~0;
+               validation_state->live_max_clamp_regs[i] = false;
+               validation_state->live_immediates[i] = ~0;
+       }
+}
+
+static bool
+texturing_in_progress(struct vc4_shader_validation_state *validation_state)
+{
+       return (validation_state->tmu_write_count[0] != 0 ||
+               validation_state->tmu_write_count[1] != 0);
+}
+
+static bool
+vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
+{
+       uint32_t ip = validation_state->ip;
+
+       if (!test_bit(ip, validation_state->branch_targets))
+               return true;
+
+       if (texturing_in_progress(validation_state)) {
+               DRM_ERROR("Branch target landed during TMU setup\n");
+               return false;
+       }
+
+       /* Reset our live values tracking, since this instruction may have
+        * multiple predecessors.
+        *
+        * One could potentially do analysis to determine that, for
+        * example, all predecessors have a live max clamp in the same
+        * register, but we don't bother with that.
+        */
+       reset_validation_state(validation_state);
+
+       /* Since we've entered a basic block from potentially multiple
+        * predecessors, we need the uniforms address to be updated before any
+        * unforms are read.  We require that after any branch point, the next
+        * uniform to be loaded is a uniform address offset.  That uniform's
+        * offset will be marked by the uniform address register write
+        * validation, or a one-off the end-of-program check.
+        */
+       validation_state->needs_uniform_address_update = true;
+
+       return true;
+}
+
 struct vc4_validated_shader_info *
 vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
 {
        bool found_shader_end = false;
        int shader_end_ip = 0;
-       uint32_t ip, max_ip;
-       uint64_t *shader;
-       struct vc4_validated_shader_info *validated_shader;
+       uint32_t ip;
+       struct vc4_validated_shader_info *validated_shader = NULL;
        struct vc4_shader_validation_state validation_state;
-       int i;
 
        memset(&validation_state, 0, sizeof(validation_state));
+       validation_state.shader = shader_obj->vaddr;
+       validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
 
-       for (i = 0; i < 8; i++)
-               validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
-       for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
-               validation_state.live_min_clamp_offsets[i] = ~0;
+       reset_validation_state(&validation_state);
 
-       shader = shader_obj->vaddr;
-       max_ip = shader_obj->base.size / sizeof(uint64_t);
+       validation_state.branch_targets =
+               kcalloc(BITS_TO_LONGS(validation_state.max_ip),
+                       sizeof(unsigned long), GFP_KERNEL);
+       if (!validation_state.branch_targets)
+               goto fail;
 
        validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
        if (!validated_shader)
-               return NULL;
+               goto fail;
+
+       if (!vc4_validate_branches(&validation_state))
+               goto fail;
 
-       for (ip = 0; ip < max_ip; ip++) {
-               uint64_t inst = shader[ip];
+       for (ip = 0; ip < validation_state.max_ip; ip++) {
+               uint64_t inst = validation_state.shader[ip];
                uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
 
+               validation_state.ip = ip;
+
+               if (!vc4_handle_branch_target(&validation_state))
+                       goto fail;
+
                switch (sig) {
                case QPU_SIG_NONE:
                case QPU_SIG_WAIT_FOR_SCOREBOARD:
@@ -450,13 +791,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
                case QPU_SIG_LOAD_TMU1:
                case QPU_SIG_PROG_END:
                case QPU_SIG_SMALL_IMM:
-                       if (!check_instruction_writes(inst, validated_shader,
+                       if (!check_instruction_writes(validated_shader,
                                                      &validation_state)) {
                                DRM_ERROR("Bad write at ip %d\n", ip);
                                goto fail;
                        }
 
-                       if (!check_instruction_reads(inst, validated_shader))
+                       if (!check_instruction_reads(validated_shader,
+                                                    &validation_state))
                                goto fail;
 
                        if (sig == QPU_SIG_PROG_END) {
@@ -467,13 +809,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
                        break;
 
                case QPU_SIG_LOAD_IMM:
-                       if (!check_instruction_writes(inst, validated_shader,
+                       if (!check_instruction_writes(validated_shader,
                                                      &validation_state)) {
                                DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
                                goto fail;
                        }
                        break;
 
+               case QPU_SIG_BRANCH:
+                       if (!check_branch(inst, validated_shader,
+                                         &validation_state, ip))
+                               goto fail;
+                       break;
                default:
                        DRM_ERROR("Unsupported QPU signal %d at "
                                  "instruction %d\n", sig, ip);
@@ -487,13 +834,28 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
                        break;
        }
 
-       if (ip == max_ip) {
+       if (ip == validation_state.max_ip) {
                DRM_ERROR("shader failed to terminate before "
                          "shader BO end at %zd\n",
                          shader_obj->base.size);
                goto fail;
        }
 
+       /* If we did a backwards branch and we haven't emitted a uniforms
+        * reset since then, we still need the uniforms stream to have the
+        * uniforms address available so that the backwards branch can do its
+        * uniforms reset.
+        *
+        * We could potentially prove that the backwards branch doesn't
+        * contain any uses of uniforms until program exit, but that doesn't
+        * seem to be worth the trouble.
+        */
+       if (validation_state.needs_uniform_address_for_loop) {
+               if (!require_uniform_address_uniform(validated_shader))
+                       goto fail;
+               validated_shader->uniforms_size += 4;
+       }
+
        /* Again, no chance of integer overflow here because the worst case
         * scenario is 8 bytes of uniforms plus handles per 8-byte
         * instruction.
@@ -502,9 +864,12 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
                (validated_shader->uniforms_size +
                 4 * validated_shader->num_texture_samples);
 
+       kfree(validation_state.branch_targets);
+
        return validated_shader;
 
 fail:
+       kfree(validation_state.branch_targets);
        if (validated_shader) {
                kfree(validated_shader->texture_samples);
                kfree(validated_shader);
index 3f4c7b8420287188288641e66b9f82addd5b21f3..bfcdea1330e64282fd379de21de87087cc993122 100644 (file)
@@ -1,4 +1,4 @@
 ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o
+vgem-y := vgem_drv.o vgem_fence.o
 
 obj-$(CONFIG_DRM_VGEM) += vgem.o
index 341f9be3dde6edfbdccaf25a08382256f20cba0e..c15bafb06665f0ec8c788cd1c21d7067dc5a8df3 100644 (file)
 #define DRIVER_MAJOR   1
 #define DRIVER_MINOR   0
 
-void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
-{
-       drm_gem_put_pages(&obj->base, obj->pages, false, false);
-       obj->pages = NULL;
-}
-
 static void vgem_gem_free_object(struct drm_gem_object *obj)
 {
        struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
 
-       drm_gem_free_mmap_offset(obj);
-
-       if (vgem_obj->use_dma_buf && obj->dma_buf) {
-               dma_buf_put(obj->dma_buf);
-               obj->dma_buf = NULL;
-       }
-
        drm_gem_object_release(obj);
-
-       if (vgem_obj->pages)
-               vgem_gem_put_pages(vgem_obj);
-
-       vgem_obj->pages = NULL;
-
        kfree(vgem_obj);
 }
 
-int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
-{
-       struct page **pages;
-
-       if (obj->pages || obj->use_dma_buf)
-               return 0;
-
-       pages = drm_gem_get_pages(&obj->base);
-       if (IS_ERR(pages)) {
-               return PTR_ERR(pages);
-       }
-
-       obj->pages = pages;
-
-       return 0;
-}
-
 static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct drm_vgem_gem_object *obj = vma->vm_private_data;
-       loff_t num_pages;
-       pgoff_t page_offset;
-       int ret;
-
        /* We don't use vmf->pgoff since that has the fake offset */
-       page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
-               PAGE_SHIFT;
-
-       num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
-       if (page_offset > num_pages)
-               return VM_FAULT_SIGBUS;
-
-       ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
-                            obj->pages[page_offset]);
-       switch (ret) {
-       case 0:
-               return VM_FAULT_NOPAGE;
-       case -ENOMEM:
-               return VM_FAULT_OOM;
-       case -EBUSY:
-               return VM_FAULT_RETRY;
-       case -EFAULT:
-       case -EINVAL:
-               return VM_FAULT_SIGBUS;
-       default:
-               WARN_ON(1);
-               return VM_FAULT_SIGBUS;
+       unsigned long vaddr = (unsigned long)vmf->virtual_address;
+       struct page *page;
+
+       page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
+                                      (vaddr - vma->vm_start) >> PAGE_SHIFT);
+       if (!IS_ERR(page)) {
+               vmf->page = page;
+               return 0;
+       } else switch (PTR_ERR(page)) {
+               case -ENOSPC:
+               case -ENOMEM:
+                       return VM_FAULT_OOM;
+               case -EBUSY:
+                       return VM_FAULT_RETRY;
+               case -EFAULT:
+               case -EINVAL:
+                       return VM_FAULT_SIGBUS;
+               default:
+                       WARN_ON_ONCE(PTR_ERR(page));
+                       return VM_FAULT_SIGBUS;
        }
 }
 
@@ -126,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
        .close = drm_gem_vm_close,
 };
 
+static int vgem_open(struct drm_device *dev, struct drm_file *file)
+{
+       struct vgem_file *vfile;
+       int ret;
+
+       vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
+       if (!vfile)
+               return -ENOMEM;
+
+       file->driver_priv = vfile;
+
+       ret = vgem_fence_open(vfile);
+       if (ret) {
+               kfree(vfile);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
+{
+       struct vgem_file *vfile = file->driver_priv;
+
+       vgem_fence_close(vfile);
+       kfree(vfile);
+}
+
 /* ioctls */
 
 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
@@ -134,57 +119,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
                                              unsigned long size)
 {
        struct drm_vgem_gem_object *obj;
-       struct drm_gem_object *gem_object;
-       int err;
-
-       size = roundup(size, PAGE_SIZE);
+       int ret;
 
        obj = kzalloc(sizeof(*obj), GFP_KERNEL);
        if (!obj)
                return ERR_PTR(-ENOMEM);
 
-       gem_object = &obj->base;
-
-       err = drm_gem_object_init(dev, gem_object, size);
-       if (err)
-               goto out;
-
-       err = vgem_gem_get_pages(obj);
-       if (err)
-               goto out;
-
-       err = drm_gem_handle_create(file, gem_object, handle);
-       if (err)
-               goto handle_out;
+       ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
+       if (ret)
+               goto err_free;
 
-       drm_gem_object_unreference_unlocked(gem_object);
+       ret = drm_gem_handle_create(file, &obj->base, handle);
+       drm_gem_object_unreference_unlocked(&obj->base);
+       if (ret)
+               goto err;
 
-       return gem_object;
+       return &obj->base;
 
-handle_out:
-       drm_gem_object_release(gem_object);
-out:
+err_free:
        kfree(obj);
-       return ERR_PTR(err);
+err:
+       return ERR_PTR(ret);
 }
 
 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                                struct drm_mode_create_dumb *args)
 {
        struct drm_gem_object *gem_object;
-       uint64_t size;
-       uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
+       u64 pitch, size;
 
+       pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
        size = args->height * pitch;
        if (size == 0)
                return -EINVAL;
 
        gem_object = vgem_gem_create(dev, file, &args->handle, size);
-
-       if (IS_ERR(gem_object)) {
-               DRM_DEBUG_DRIVER("object creation failed\n");
+       if (IS_ERR(gem_object))
                return PTR_ERR(gem_object);
-       }
 
        args->size = gem_object->size;
        args->pitch = pitch;
@@ -194,26 +165,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
        return 0;
 }
 
-int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
-                     uint32_t handle, uint64_t *offset)
+static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
+                            uint32_t handle, uint64_t *offset)
 {
-       int ret = 0;
        struct drm_gem_object *obj;
+       int ret;
 
        obj = drm_gem_object_lookup(file, handle);
        if (!obj)
                return -ENOENT;
 
+       if (!obj->filp) {
+               ret = -EINVAL;
+               goto unref;
+       }
+
        ret = drm_gem_create_mmap_offset(obj);
        if (ret)
                goto unref;
 
-       BUG_ON(!obj->filp);
-
-       obj->filp->private_data = obj;
-
        *offset = drm_vma_node_offset_addr(&obj->vma_node);
-
 unref:
        drm_gem_object_unreference_unlocked(obj);
 
@@ -221,26 +192,134 @@ unref:
 }
 
 static struct drm_ioctl_desc vgem_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
+static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       unsigned long flags = vma->vm_flags;
+       int ret;
+
+       ret = drm_gem_mmap(filp, vma);
+       if (ret)
+               return ret;
+
+       /* Keep the WC mmaping set by drm_gem_mmap() but our pages
+        * are ordinary and not special.
+        */
+       vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
+       return 0;
+}
+
 static const struct file_operations vgem_driver_fops = {
        .owner          = THIS_MODULE,
        .open           = drm_open,
-       .mmap           = drm_gem_mmap,
+       .mmap           = vgem_mmap,
        .poll           = drm_poll,
        .read           = drm_read,
        .unlocked_ioctl = drm_ioctl,
        .release        = drm_release,
 };
 
+static int vgem_prime_pin(struct drm_gem_object *obj)
+{
+       long n_pages = obj->size >> PAGE_SHIFT;
+       struct page **pages;
+
+       /* Flush the object from the CPU cache so that importers can rely
+        * on coherent indirect access via the exported dma-address.
+        */
+       pages = drm_gem_get_pages(obj);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+
+       drm_clflush_pages(pages, n_pages);
+       drm_gem_put_pages(obj, pages, true, false);
+
+       return 0;
+}
+
+static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+       struct sg_table *st;
+       struct page **pages;
+
+       pages = drm_gem_get_pages(obj);
+       if (IS_ERR(pages))
+               return ERR_CAST(pages);
+
+       st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
+       drm_gem_put_pages(obj, pages, false, false);
+
+       return st;
+}
+
+static void *vgem_prime_vmap(struct drm_gem_object *obj)
+{
+       long n_pages = obj->size >> PAGE_SHIFT;
+       struct page **pages;
+       void *addr;
+
+       pages = drm_gem_get_pages(obj);
+       if (IS_ERR(pages))
+               return NULL;
+
+       addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+       drm_gem_put_pages(obj, pages, false, false);
+
+       return addr;
+}
+
+static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+       vunmap(vaddr);
+}
+
+static int vgem_prime_mmap(struct drm_gem_object *obj,
+                          struct vm_area_struct *vma)
+{
+       int ret;
+
+       if (obj->size < vma->vm_end - vma->vm_start)
+               return -EINVAL;
+
+       if (!obj->filp)
+               return -ENODEV;
+
+       ret = obj->filp->f_op->mmap(obj->filp, vma);
+       if (ret)
+               return ret;
+
+       fput(vma->vm_file);
+       vma->vm_file = get_file(obj->filp);
+       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+       return 0;
+}
+
 static struct drm_driver vgem_driver = {
-       .driver_features                = DRIVER_GEM,
-       .gem_free_object                = vgem_gem_free_object,
+       .driver_features                = DRIVER_GEM | DRIVER_PRIME,
+       .open                           = vgem_open,
+       .preclose                       = vgem_preclose,
+       .gem_free_object_unlocked       = vgem_gem_free_object,
        .gem_vm_ops                     = &vgem_gem_vm_ops,
        .ioctls                         = vgem_ioctls,
+       .num_ioctls                     = ARRAY_SIZE(vgem_ioctls),
        .fops                           = &vgem_driver_fops,
+
        .dumb_create                    = vgem_gem_dumb_create,
        .dumb_map_offset                = vgem_gem_dumb_map,
+
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .gem_prime_pin = vgem_prime_pin,
+       .gem_prime_export = drm_gem_prime_export,
+       .gem_prime_get_sg_table = vgem_prime_get_sg_table,
+       .gem_prime_vmap = vgem_prime_vmap,
+       .gem_prime_vunmap = vgem_prime_vunmap,
+       .gem_prime_mmap = vgem_prime_mmap,
+
        .name   = DRIVER_NAME,
        .desc   = DRIVER_DESC,
        .date   = DRIVER_DATE,
@@ -248,7 +327,7 @@ static struct drm_driver vgem_driver = {
        .minor  = DRIVER_MINOR,
 };
 
-struct drm_device *vgem_device;
+static struct drm_device *vgem_device;
 
 static int __init vgem_init(void)
 {
@@ -260,10 +339,7 @@ static int __init vgem_init(void)
                goto out;
        }
 
-       drm_dev_set_unique(vgem_device, "vgem");
-
        ret  = drm_dev_register(vgem_device, 0);
-
        if (ret)
                goto out_unref;
 
@@ -285,5 +361,6 @@ module_init(vgem_init);
 module_exit(vgem_exit);
 
 MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL and additional rights");
index e9f92f7ee275cf791b8f81d8a53c1a8f3c45ba8e..1f8798ad329c416a0aa9c867d0ceae826ae6c313 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
 
+#include <uapi/drm/vgem_drm.h>
+
+struct vgem_file {
+       struct idr fence_idr;
+       struct mutex fence_mutex;
+};
+
 #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
 struct drm_vgem_gem_object {
        struct drm_gem_object base;
-       struct page **pages;
-       bool use_dma_buf;
 };
 
-/* vgem_drv.c */
-extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
-extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
+int vgem_fence_open(struct vgem_file *file);
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+                           void *data,
+                           struct drm_file *file);
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+                           void *data,
+                           struct drm_file *file);
+void vgem_fence_close(struct vgem_file *file);
 
 #endif
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
new file mode 100644 (file)
index 0000000..5c57c1f
--- /dev/null
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software")
+ * to deal in the software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * them Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "vgem_drv.h"
+
+#define VGEM_FENCE_TIMEOUT (10*HZ)
+
+struct vgem_fence {
+       struct fence base;
+       struct spinlock lock;
+       struct timer_list timer;
+};
+
+static const char *vgem_fence_get_driver_name(struct fence *fence)
+{
+       return "vgem";
+}
+
+static const char *vgem_fence_get_timeline_name(struct fence *fence)
+{
+       return "unbound";
+}
+
+static bool vgem_fence_signaled(struct fence *fence)
+{
+       return false;
+}
+
+static bool vgem_fence_enable_signaling(struct fence *fence)
+{
+       return true;
+}
+
+static void vgem_fence_release(struct fence *base)
+{
+       struct vgem_fence *fence = container_of(base, typeof(*fence), base);
+
+       del_timer_sync(&fence->timer);
+       fence_free(&fence->base);
+}
+
+static void vgem_fence_value_str(struct fence *fence, char *str, int size)
+{
+       snprintf(str, size, "%u", fence->seqno);
+}
+
+static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
+                                         int size)
+{
+       snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
+}
+
+static const struct fence_ops vgem_fence_ops = {
+       .get_driver_name = vgem_fence_get_driver_name,
+       .get_timeline_name = vgem_fence_get_timeline_name,
+       .enable_signaling = vgem_fence_enable_signaling,
+       .signaled = vgem_fence_signaled,
+       .wait = fence_default_wait,
+       .release = vgem_fence_release,
+
+       .fence_value_str = vgem_fence_value_str,
+       .timeline_value_str = vgem_fence_timeline_value_str,
+};
+
+static void vgem_fence_timeout(unsigned long data)
+{
+       struct vgem_fence *fence = (struct vgem_fence *)data;
+
+       fence_signal(&fence->base);
+}
+
+static struct fence *vgem_fence_create(struct vgem_file *vfile,
+                                      unsigned int flags)
+{
+       struct vgem_fence *fence;
+
+       fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+       if (!fence)
+               return NULL;
+
+       spin_lock_init(&fence->lock);
+       fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
+                  fence_context_alloc(1), 1);
+
+       setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+
+       /* We force the fence to expire within 10s to prevent driver hangs */
+       mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
+
+       return &fence->base;
+}
+
+static int attach_dmabuf(struct drm_device *dev,
+                        struct drm_gem_object *obj)
+{
+       struct dma_buf *dmabuf;
+
+       if (obj->dma_buf)
+               return 0;
+
+       dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
+       if (IS_ERR(dmabuf))
+               return PTR_ERR(dmabuf);
+
+       obj->dma_buf = dmabuf;
+       drm_gem_object_reference(obj);
+       return 0;
+}
+
+/*
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
+ *
+ * Create and attach a fence to the vGEM handle. This fence is then exposed
+ * via the dma-buf reservation object and visible to consumers of the exported
+ * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
+ * vGEM buffer is being written to by the client and is exposed as an exclusive
+ * fence, otherwise the fence indicates the client is current reading from the
+ * buffer and all future writes should wait for the client to signal its
+ * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
+ * an exclusive fence when adding a read, or any fence when adding a write),
+ * -EBUSY is reported. Serialisation between operations should be handled
+ * by waiting upon the dma-buf.
+ *
+ * This returns the handle for the new fence that must be signaled within 10
+ * seconds (or otherwise it will automatically expire). See
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
+ *
+ * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
+ */
+int vgem_fence_attach_ioctl(struct drm_device *dev,
+                           void *data,
+                           struct drm_file *file)
+{
+       struct drm_vgem_fence_attach *arg = data;
+       struct vgem_file *vfile = file->driver_priv;
+       struct reservation_object *resv;
+       struct drm_gem_object *obj;
+       struct fence *fence;
+       int ret;
+
+       if (arg->flags & ~VGEM_FENCE_WRITE)
+               return -EINVAL;
+
+       if (arg->pad)
+               return -EINVAL;
+
+       obj = drm_gem_object_lookup(file, arg->handle);
+       if (!obj)
+               return -ENOENT;
+
+       ret = attach_dmabuf(dev, obj);
+       if (ret)
+               goto err;
+
+       fence = vgem_fence_create(vfile, arg->flags);
+       if (!fence) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       /* Check for a conflicting fence */
+       resv = obj->dma_buf->resv;
+       if (!reservation_object_test_signaled_rcu(resv,
+                                                 arg->flags & VGEM_FENCE_WRITE)) {
+               ret = -EBUSY;
+               goto err_fence;
+       }
+
+       /* Expose the fence via the dma-buf */
+       ret = 0;
+       mutex_lock(&resv->lock.base);
+       if (arg->flags & VGEM_FENCE_WRITE)
+               reservation_object_add_excl_fence(resv, fence);
+       else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+               reservation_object_add_shared_fence(resv, fence);
+       mutex_unlock(&resv->lock.base);
+
+       /* Record the fence in our idr for later signaling */
+       if (ret == 0) {
+               mutex_lock(&vfile->fence_mutex);
+               ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
+               mutex_unlock(&vfile->fence_mutex);
+               if (ret > 0) {
+                       arg->out_fence = ret;
+                       ret = 0;
+               }
+       }
+err_fence:
+       if (ret) {
+               fence_signal(fence);
+               fence_put(fence);
+       }
+err:
+       drm_gem_object_unreference_unlocked(obj);
+       return ret;
+}
+
+/*
+ * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
+ *
+ * Signal and consume a fence ealier attached to a vGEM handle using
+ * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
+ *
+ * All fences must be signaled within 10s of attachment or otherwise they
+ * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
+ *
+ * Signaling a fence indicates to all consumers of the dma-buf that the
+ * client has completed the operation associated with the fence, and that the
+ * buffer is then ready for consumption.
+ *
+ * If the fence does not exist (or has already been signaled by the client),
+ * vgem_fence_signal_ioctl returns -ENOENT.
+ */
+int vgem_fence_signal_ioctl(struct drm_device *dev,
+                           void *data,
+                           struct drm_file *file)
+{
+       struct vgem_file *vfile = file->driver_priv;
+       struct drm_vgem_fence_signal *arg = data;
+       struct fence *fence;
+       int ret = 0;
+
+       if (arg->flags)
+               return -EINVAL;
+
+       mutex_lock(&vfile->fence_mutex);
+       fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
+       mutex_unlock(&vfile->fence_mutex);
+       if (!fence)
+               return -ENOENT;
+       if (IS_ERR(fence))
+               return PTR_ERR(fence);
+
+       if (fence_is_signaled(fence))
+               ret = -ETIMEDOUT;
+
+       fence_signal(fence);
+       fence_put(fence);
+       return ret;
+}
+
+int vgem_fence_open(struct vgem_file *vfile)
+{
+       mutex_init(&vfile->fence_mutex);
+       idr_init(&vfile->fence_idr);
+
+       return 0;
+}
+
+static int __vgem_fence_idr_fini(int id, void *p, void *data)
+{
+       fence_signal(p);
+       fence_put(p);
+       return 0;
+}
+
+void vgem_fence_close(struct vgem_file *vfile)
+{
+       idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
+       idr_destroy(&vfile->fence_idr);
+}
index 4f20742e7788c4fe0626782c18b888795d5179c8..a04ef1c992d99fa2b3dc5428a7eb30d851607a71 100644 (file)
@@ -208,7 +208,7 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
        struct via_file_private *file_priv = file->driver_priv;
        struct via_memblock *entry, *next;
 
-       if (!(file->minor->master && file->master->lock.hw_lock))
+       if (!(dev->master && file->master->lock.hw_lock))
                return;
 
        drm_legacy_idlelock_take(&file->master->lock);
index 9983eadb81b6a18bb9eef9afc80895970fa5ac18..e1afc3d3f8d95cfb33d1905fcbccf0ab0201190d 100644 (file)
@@ -1,11 +1,7 @@
 config DRM_VIRTIO_GPU
        tristate "Virtio GPU driver"
        depends on DRM && VIRTIO
-       select FB_SYS_FILLRECT
-       select FB_SYS_COPYAREA
-       select FB_SYS_IMAGEBLIT
         select DRM_KMS_HELPER
-        select DRM_KMS_FB_HELPER
         select DRM_TTM
        help
           This is the virtual GPU driver for virtio.  It can be used with
index d4305da88f44a3d28d8c6db5fad066eac52ebcf9..4e192aa2d0216fe15d9f7746e3092886b55e031d 100644 (file)
@@ -29,8 +29,8 @@
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_atomic_helper.h>
 
-#define XRES_MIN   320
-#define YRES_MIN   200
+#define XRES_MIN    32
+#define YRES_MIN    32
 
 #define XRES_DEF  1024
 #define YRES_DEF   768
 #define XRES_MAX  8192
 #define YRES_MAX  8192
 
-static void
-virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
-                      struct virtio_gpu_output *output)
-{
-       output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
-       output->cursor.resource_id = 0;
-       virtio_gpu_cursor_ping(vgdev, output);
-}
-
-static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
-                                     struct drm_file *file_priv,
-                                     uint32_t handle,
-                                     uint32_t width,
-                                     uint32_t height,
-                                     int32_t hot_x, int32_t hot_y)
-{
-       struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
-       struct virtio_gpu_output *output =
-               container_of(crtc, struct virtio_gpu_output, crtc);
-       struct drm_gem_object *gobj = NULL;
-       struct virtio_gpu_object *qobj = NULL;
-       struct virtio_gpu_fence *fence = NULL;
-       int ret = 0;
-
-       if (handle == 0) {
-               virtio_gpu_hide_cursor(vgdev, output);
-               return 0;
-       }
-
-       /* lookup the cursor */
-       gobj = drm_gem_object_lookup(file_priv, handle);
-       if (gobj == NULL)
-               return -ENOENT;
-
-       qobj = gem_to_virtio_gpu_obj(gobj);
-
-       if (!qobj->hw_res_handle) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
-                                          cpu_to_le32(64),
-                                          cpu_to_le32(64),
-                                          0, 0, &fence);
-       ret = virtio_gpu_object_reserve(qobj, false);
-       if (!ret) {
-               reservation_object_add_excl_fence(qobj->tbo.resv,
-                                                 &fence->f);
-               fence_put(&fence->f);
-               virtio_gpu_object_unreserve(qobj);
-               virtio_gpu_object_wait(qobj, false);
-       }
-
-       output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
-       output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
-       output->cursor.hot_x = cpu_to_le32(hot_x);
-       output->cursor.hot_y = cpu_to_le32(hot_y);
-       virtio_gpu_cursor_ping(vgdev, output);
-       ret = 0;
-
-out:
-       drm_gem_object_unreference_unlocked(gobj);
-       return ret;
-}
-
-static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
-                                   int x, int y)
-{
-       struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
-       struct virtio_gpu_output *output =
-               container_of(crtc, struct virtio_gpu_output, crtc);
-
-       output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
-       output->cursor.pos.x = cpu_to_le32(x);
-       output->cursor.pos.y = cpu_to_le32(y);
-       virtio_gpu_cursor_ping(vgdev, output);
-       return 0;
-}
-
-static int virtio_gpu_page_flip(struct drm_crtc *crtc,
-                               struct drm_framebuffer *fb,
-                               struct drm_pending_vblank_event *event,
-                               uint32_t flags)
-{
-       struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
-       struct virtio_gpu_output *output =
-               container_of(crtc, struct virtio_gpu_output, crtc);
-       struct drm_plane *plane = crtc->primary;
-       struct virtio_gpu_framebuffer *vgfb;
-       struct virtio_gpu_object *bo;
-       unsigned long irqflags;
-       uint32_t handle;
-
-       plane->fb = fb;
-       vgfb = to_virtio_gpu_framebuffer(plane->fb);
-       bo = gem_to_virtio_gpu_obj(vgfb->obj);
-       handle = bo->hw_res_handle;
-
-       DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle,
-                 bo->dumb ? ", dumb" : "",
-                 crtc->mode.hdisplay, crtc->mode.vdisplay);
-       if (bo->dumb) {
-               virtio_gpu_cmd_transfer_to_host_2d
-                       (vgdev, handle, 0,
-                        cpu_to_le32(crtc->mode.hdisplay),
-                        cpu_to_le32(crtc->mode.vdisplay),
-                        0, 0, NULL);
-       }
-       virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
-                                  crtc->mode.hdisplay,
-                                  crtc->mode.vdisplay, 0, 0);
-       virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0,
-                                     crtc->mode.hdisplay,
-                                     crtc->mode.vdisplay);
-
-       if (event) {
-               spin_lock_irqsave(&crtc->dev->event_lock, irqflags);
-               drm_send_vblank_event(crtc->dev, -1, event);
-               spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags);
-       }
-
-       return 0;
-}
-
 static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
-       .cursor_set2            = virtio_gpu_crtc_cursor_set,
-       .cursor_move            = virtio_gpu_crtc_cursor_move,
        .set_config             = drm_atomic_helper_set_config,
        .destroy                = drm_crtc_cleanup,
 
-       .page_flip              = virtio_gpu_page_flip,
+       .page_flip              = drm_atomic_helper_page_flip,
        .reset                  = drm_atomic_helper_crtc_reset,
        .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
        .atomic_destroy_state   = drm_atomic_helper_crtc_destroy_state,
@@ -180,8 +53,7 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
        struct virtio_gpu_framebuffer *virtio_gpu_fb
                = to_virtio_gpu_framebuffer(fb);
 
-       if (virtio_gpu_fb->obj)
-               drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
+       drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
        drm_framebuffer_cleanup(fb);
        kfree(virtio_gpu_fb);
 }
@@ -267,6 +139,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
        if (crtc->state->event)
                drm_crtc_send_vblank_event(crtc, crtc->state->event);
+       crtc->state->event = NULL;
        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
@@ -341,15 +214,6 @@ static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
        return MODE_BAD;
 }
 
-static struct drm_encoder*
-virtio_gpu_best_encoder(struct drm_connector *connector)
-{
-       struct virtio_gpu_output *virtio_gpu_output =
-               drm_connector_to_virtio_gpu_output(connector);
-
-       return &virtio_gpu_output->enc;
-}
-
 static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
        .mode_set   = virtio_gpu_enc_mode_set,
        .enable     = virtio_gpu_enc_enable,
@@ -359,7 +223,6 @@ static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
 static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
        .get_modes    = virtio_gpu_conn_get_modes,
        .mode_valid   = virtio_gpu_conn_mode_valid,
-       .best_encoder = virtio_gpu_best_encoder,
 };
 
 static enum drm_connector_status virtio_gpu_conn_detect(
@@ -406,7 +269,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
        struct drm_connector *connector = &output->conn;
        struct drm_encoder *encoder = &output->enc;
        struct drm_crtc *crtc = &output->crtc;
-       struct drm_plane *plane;
+       struct drm_plane *primary, *cursor;
 
        output->index = index;
        if (index == 0) {
@@ -415,13 +278,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
                output->info.r.height = cpu_to_le32(YRES_DEF);
        }
 
-       plane = virtio_gpu_plane_init(vgdev, index);
-       if (IS_ERR(plane))
-               return PTR_ERR(plane);
-       drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+       primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
+       if (IS_ERR(primary))
+               return PTR_ERR(primary);
+       cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
+       if (IS_ERR(cursor))
+               return PTR_ERR(cursor);
+       drm_crtc_init_with_planes(dev, crtc, primary, cursor,
                                  &virtio_gpu_crtc_funcs, NULL);
        drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
-       plane->crtc = crtc;
+       primary->crtc = crtc;
+       cursor->crtc = crtc;
 
        drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
                           DRM_MODE_CONNECTOR_VIRTUAL);
@@ -458,14 +325,31 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
        ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
        if (ret) {
                kfree(virtio_gpu_fb);
-               if (obj)
-                       drm_gem_object_unreference_unlocked(obj);
+               drm_gem_object_unreference_unlocked(obj);
                return NULL;
        }
 
        return &virtio_gpu_fb->base;
 }
 
+static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
+{
+       struct drm_device *dev = state->dev;
+
+       drm_atomic_helper_commit_modeset_disables(dev, state);
+       drm_atomic_helper_commit_modeset_enables(dev, state);
+       drm_atomic_helper_commit_planes(dev, state, true);
+
+       drm_atomic_helper_commit_hw_done(state);
+
+       drm_atomic_helper_wait_for_vblanks(dev, state);
+       drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+static struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
+       .atomic_commit_tail = vgdev_atomic_commit_tail,
+};
+
 static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
        .fb_create = virtio_gpu_user_framebuffer_create,
        .atomic_check = drm_atomic_helper_check,
@@ -477,7 +361,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
        int i;
 
        drm_mode_config_init(vgdev->ddev);
-       vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs;
+       vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
+       vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers;
 
        /* modes will be validated against the framebuffer size */
        vgdev->ddev->mode_config.min_width = XRES_MIN;
index 88a39165edd50705d561ca8c5d23df2ac628f9a1..7f0e93f87a554ee8e93a81ee2f4d80f9745ad3bc 100644 (file)
 
 #include "virtgpu_drv.h"
 
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
-{
-       struct pci_dev *pdev = dev->pdev;
-
-       if (pdev) {
-               return drm_pci_set_busid(dev, master);
-       }
-       return 0;
-}
-
 static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
 {
        struct apertures_struct *ap;
index 3cc7afa77a35c1b40f7bc7836f4580126a9194af..c13f70cfc46130946cd41e1846101160d426439b 100644 (file)
@@ -117,7 +117,6 @@ static const struct file_operations virtio_gpu_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
-       .set_busid = drm_virtio_set_busid,
        .load = virtio_gpu_driver_load,
        .unload = virtio_gpu_driver_unload,
        .open = virtio_gpu_driver_open,
@@ -143,7 +142,7 @@ static struct drm_driver driver = {
        .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
        .gem_prime_mmap = virtgpu_gem_prime_mmap,
 
-       .gem_free_object = virtio_gpu_gem_free_object,
+       .gem_free_object_unlocked = virtio_gpu_gem_free_object,
        .gem_open_object = virtio_gpu_gem_object_open,
        .gem_close_object = virtio_gpu_gem_object_close,
        .fops = &virtio_gpu_driver_fops,
index 0a54f43f846a9d560d4b626872a86eecda1cdaf7..b18ef3111f0c40c054c33e66877927a6fbbd4d5b 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_crtc_helper.h>
 #include <ttm/ttm_bo_api.h>
 #include <ttm/ttm_bo_driver.h>
@@ -48,7 +49,6 @@
 #define DRIVER_PATCHLEVEL 1
 
 /* virtgpu_drm_bus.c */
-int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
 int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
 
 struct virtio_gpu_object {
@@ -335,6 +335,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
 
 /* virtio_gpu_plane.c */
 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+                                       enum drm_plane_type type,
                                        int index);
 
 /* virtio_gpu_ttm.c */
index 70b44a2345ab43e2ac1bef8ded76c292ab752884..925ca25209dfa12a60128b333d21d19fad9c03af 100644 (file)
@@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = {
        DRM_FORMAT_ABGR8888,
 };
 
+static const uint32_t virtio_gpu_cursor_formats[] = {
+       DRM_FORMAT_ARGB8888,
+};
+
 static void virtio_gpu_plane_destroy(struct drm_plane *plane)
 {
        kfree(plane);
@@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
        return 0;
 }
 
-static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
-                                          struct drm_plane_state *old_state)
+static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
+                                           struct drm_plane_state *old_state)
 {
        struct drm_device *dev = plane->dev;
        struct virtio_gpu_device *vgdev = dev->dev_private;
-       struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc);
+       struct virtio_gpu_output *output = NULL;
        struct virtio_gpu_framebuffer *vgfb;
        struct virtio_gpu_object *bo;
        uint32_t handle;
 
+       if (plane->state->crtc)
+               output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
+       if (old_state->crtc)
+               output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
+       WARN_ON(!output);
+
        if (plane->state->fb) {
                vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
                bo = gem_to_virtio_gpu_obj(vgfb->obj);
@@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
                if (bo->dumb) {
                        virtio_gpu_cmd_transfer_to_host_2d
                                (vgdev, handle, 0,
-                                cpu_to_le32(plane->state->crtc_w),
-                                cpu_to_le32(plane->state->crtc_h),
-                                plane->state->crtc_x, plane->state->crtc_y, NULL);
+                                cpu_to_le32(plane->state->src_w >> 16),
+                                cpu_to_le32(plane->state->src_h >> 16),
+                                plane->state->src_x >> 16,
+                                plane->state->src_y >> 16, NULL);
                }
        } else {
                handle = 0;
        }
 
-       DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle,
+       DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
                  plane->state->crtc_w, plane->state->crtc_h,
-                 plane->state->crtc_x, plane->state->crtc_y);
+                 plane->state->crtc_x, plane->state->crtc_y,
+                 plane->state->src_w >> 16,
+                 plane->state->src_h >> 16,
+                 plane->state->src_x >> 16,
+                 plane->state->src_y >> 16);
        virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
-                                  plane->state->crtc_w,
-                                  plane->state->crtc_h,
-                                  plane->state->crtc_x,
-                                  plane->state->crtc_y);
+                                  plane->state->src_w >> 16,
+                                  plane->state->src_h >> 16,
+                                  plane->state->src_x >> 16,
+                                  plane->state->src_y >> 16);
        virtio_gpu_cmd_resource_flush(vgdev, handle,
-                                     plane->state->crtc_x,
-                                     plane->state->crtc_y,
-                                     plane->state->crtc_w,
-                                     plane->state->crtc_h);
+                                     plane->state->src_x >> 16,
+                                     plane->state->src_y >> 16,
+                                     plane->state->src_w >> 16,
+                                     plane->state->src_h >> 16);
 }
 
+static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
+                                          struct drm_plane_state *old_state)
+{
+       struct drm_device *dev = plane->dev;
+       struct virtio_gpu_device *vgdev = dev->dev_private;
+       struct virtio_gpu_output *output = NULL;
+       struct virtio_gpu_framebuffer *vgfb;
+       struct virtio_gpu_fence *fence = NULL;
+       struct virtio_gpu_object *bo = NULL;
+       uint32_t handle;
+       int ret = 0;
 
-static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = {
+       if (plane->state->crtc)
+               output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
+       if (old_state->crtc)
+               output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
+       WARN_ON(!output);
+
+       if (plane->state->fb) {
+               vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+               bo = gem_to_virtio_gpu_obj(vgfb->obj);
+               handle = bo->hw_res_handle;
+       } else {
+               handle = 0;
+       }
+
+       if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
+               /* new cursor -- update & wait */
+               virtio_gpu_cmd_transfer_to_host_2d
+                       (vgdev, handle, 0,
+                        cpu_to_le32(plane->state->crtc_w),
+                        cpu_to_le32(plane->state->crtc_h),
+                        0, 0, &fence);
+               ret = virtio_gpu_object_reserve(bo, false);
+               if (!ret) {
+                       reservation_object_add_excl_fence(bo->tbo.resv,
+                                                         &fence->f);
+                       fence_put(&fence->f);
+                       fence = NULL;
+                       virtio_gpu_object_unreserve(bo);
+                       virtio_gpu_object_wait(bo, false);
+               }
+       }
+
+       if (plane->state->fb != old_state->fb) {
+               DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
+                         plane->state->crtc_x,
+                         plane->state->crtc_y,
+                         plane->state->fb ? plane->state->fb->hot_x : 0,
+                         plane->state->fb ? plane->state->fb->hot_y : 0);
+               output->cursor.hdr.type =
+                       cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
+               output->cursor.resource_id = cpu_to_le32(handle);
+               if (plane->state->fb) {
+                       output->cursor.hot_x =
+                               cpu_to_le32(plane->state->fb->hot_x);
+                       output->cursor.hot_y =
+                               cpu_to_le32(plane->state->fb->hot_y);
+               } else {
+                       output->cursor.hot_x = cpu_to_le32(0);
+                       output->cursor.hot_y = cpu_to_le32(0);
+               }
+       } else {
+               DRM_DEBUG("move +%d+%d\n",
+                         plane->state->crtc_x,
+                         plane->state->crtc_y);
+               output->cursor.hdr.type =
+                       cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
+       }
+       output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
+       output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
+       virtio_gpu_cursor_ping(vgdev, output);
+}
+
+static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
+       .atomic_check           = virtio_gpu_plane_atomic_check,
+       .atomic_update          = virtio_gpu_primary_plane_update,
+};
+
+static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
        .atomic_check           = virtio_gpu_plane_atomic_check,
-       .atomic_update          = virtio_gpu_plane_atomic_update,
+       .atomic_update          = virtio_gpu_cursor_plane_update,
 };
 
 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
+                                       enum drm_plane_type type,
                                        int index)
 {
        struct drm_device *dev = vgdev->ddev;
+       const struct drm_plane_helper_funcs *funcs;
        struct drm_plane *plane;
-       int ret;
+       const uint32_t *formats;
+       int ret, nformats;
 
        plane = kzalloc(sizeof(*plane), GFP_KERNEL);
        if (!plane)
                return ERR_PTR(-ENOMEM);
 
+       if (type == DRM_PLANE_TYPE_CURSOR) {
+               formats = virtio_gpu_cursor_formats;
+               nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
+               funcs = &virtio_gpu_cursor_helper_funcs;
+       } else {
+               formats = virtio_gpu_formats;
+               nformats = ARRAY_SIZE(virtio_gpu_formats);
+               funcs = &virtio_gpu_primary_helper_funcs;
+       }
        ret = drm_universal_plane_init(dev, plane, 1 << index,
                                       &virtio_gpu_plane_funcs,
-                                      virtio_gpu_formats,
-                                      ARRAY_SIZE(virtio_gpu_formats),
-                                      DRM_PLANE_TYPE_PRIMARY, NULL);
+                                      formats, nformats,
+                                      type, NULL);
        if (ret)
                goto err_plane_init;
 
-       drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs);
+       drm_plane_helper_add(plane, funcs);
        return plane;
 
 err_plane_init:
index a0580815629f52a4c6dc61a22ca330d977991570..80482ac5f95dc71ac06bdd2d3dacc68333e32006 100644 (file)
@@ -375,6 +375,12 @@ static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
                              bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem)
 {
+       int ret;
+
+       ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+       if (ret)
+               return ret;
+
        virtio_gpu_move_null(bo, new_mem);
        return 0;
 }
index 67cebb23c940e2dc0019eab8b1dbe0b4a036650a..aa04fb0159a76ef81a57a061dfd65c10756b511b 100644 (file)
@@ -293,13 +293,10 @@ static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
        struct vmw_cmdbuf_man *man = header->man;
        u32 val;
 
-       if (sizeof(header->handle) > 4)
-               val = (header->handle >> 32);
-       else
-               val = 0;
+       val = upper_32_bits(header->handle);
        vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
 
-       val = (header->handle & 0xFFFFFFFFULL);
+       val = lower_32_bits(header->handle);
        val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
        vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
 
index 9b078a4939968d8521a96b3f83280f4f06b407bc..0cd889015dc57d5c0d0a8b89f39cd17b10303fbf 100644 (file)
@@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
 {
        struct ttm_buffer_object *bo = &buf->base;
        int ret;
+       uint32_t new_flags;
 
        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
        if (unlikely(ret != 0))
@@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto err;
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false);
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, placement, interruptible, false);
+
        if (!ret)
                vmw_bo_pin_reserved(buf, true);
 
@@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 {
        struct ttm_buffer_object *bo = &buf->base;
        int ret;
+       uint32_t new_flags;
 
        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
        if (unlikely(ret != 0))
@@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto err;
 
+       if (buf->pin_count > 0) {
+               ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+               goto out_unreserve;
+       }
+
        ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
                              false);
        if (likely(ret == 0) || ret == -ERESTARTSYS)
@@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
        struct ttm_placement placement;
        struct ttm_place place;
        int ret = 0;
+       uint32_t new_flags;
 
        place = vmw_vram_placement.placement[0];
        place.lpfn = bo->num_pages;
@@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
         */
        if (bo->mem.mem_type == TTM_PL_VRAM &&
            bo->mem.start < bo->num_pages &&
-           bo->mem.start > 0)
+           bo->mem.start > 0 &&
+           buf->pin_count == 0)
                (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
 
-       ret = ttm_bo_validate(bo, &placement, interruptible, false);
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(&placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
        /* For some reason we didn't end up at the start of vram */
        WARN_ON(ret == 0 && bo->offset != 0);
index 9fcd8200d485e61aae3ff8a0cbf4fd4289a6d54b..e8ae3dc476d16fb756fd5ec4380fe644f1f858d0 100644 (file)
@@ -233,6 +233,7 @@ static int vmw_force_iommu;
 static int vmw_restrict_iommu;
 static int vmw_force_coherent;
 static int vmw_restrict_dma_mask;
+static int vmw_assume_16bpp;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
@@ -249,6 +250,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
+module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
 
 
 static void vmw_print_capabilities(uint32_t capabilities)
@@ -660,6 +663,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
        dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 
+       dev_priv->assume_16bpp = !!vmw_assume_16bpp;
+
        dev_priv->enable_fb = enable_fbdev;
 
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -706,6 +711,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                        vmw_read(dev_priv,
                                 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 
+               /*
+                * Workaround for low memory 2D VMs to compensate for the
+                * allocation taken by fbdev
+                */
+               if (!(dev_priv->capabilities & SVGA_CAP_3D))
+                       mem_size *= 2;
+
                dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
                dev_priv->prim_bb_mem =
                        vmw_read(dev_priv,
@@ -1041,15 +1053,14 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
        struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
        struct vmw_master *vmaster;
 
-       if (file_priv->minor->type != DRM_MINOR_LEGACY ||
-           !(flags & DRM_AUTH))
+       if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
                return NULL;
 
        ret = mutex_lock_interruptible(&dev->master_mutex);
        if (unlikely(ret != 0))
                return ERR_PTR(-ERESTARTSYS);
 
-       if (file_priv->is_master) {
+       if (drm_is_current_master(file_priv)) {
                mutex_unlock(&dev->master_mutex);
                return NULL;
        }
@@ -1228,8 +1239,7 @@ static int vmw_master_set(struct drm_device *dev,
 }
 
 static void vmw_master_drop(struct drm_device *dev,
-                           struct drm_file *file_priv,
-                           bool from_release)
+                           struct drm_file *file_priv)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
index 1980e2a28265d195db5002977d39239f6b3251a9..74304b03f9d4811973c27e0259cd4cbbf9417ae6 100644 (file)
@@ -32,6 +32,7 @@
 #include <drm/drmP.h>
 #include <drm/vmwgfx_drm.h>
 #include <drm/drm_hashtab.h>
+#include <drm/drm_auth.h>
 #include <linux/suspend.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_object.h>
@@ -386,6 +387,7 @@ struct vmw_private {
        spinlock_t hw_lock;
        spinlock_t cap_lock;
        bool has_dx;
+       bool assume_16bpp;
 
        /*
         * VGA registers.
index 679a4cb98ee306bb47c5c0097f1aa829c17a7fc8..d2d93959b1198ce41470a1a76a8341553c1ddc6c 100644 (file)
@@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
 
        par->set_fb = &vfb->base;
 
-       if (!par->bo_ptr) {
-               /*
-                * Pin before mapping. Since we don't know in what placement
-                * to pin, call into KMS to do it for us.
-                */
-               ret = vfb->pin(vfb);
-               if (ret) {
-                       DRM_ERROR("Could not pin the fbdev framebuffer.\n");
-                       return ret;
-               }
-
-               ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
-                                 par->vmw_bo->base.num_pages, &par->map);
-               if (ret) {
-                       vfb->unpin(vfb);
-                       DRM_ERROR("Could not map the fbdev framebuffer.\n");
-                       return ret;
-               }
-
-               par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
-       }
-
        return 0;
 }
 
@@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
        if (ret)
                goto out_unlock;
 
+       if (!par->bo_ptr) {
+               struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
+
+               /*
+                * Pin before mapping. Since we don't know in what placement
+                * to pin, call into KMS to do it for us.
+                */
+               ret = vfb->pin(vfb);
+               if (ret) {
+                       DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+                       goto out_unlock;
+               }
+
+               ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+                                 par->vmw_bo->base.num_pages, &par->map);
+               if (ret) {
+                       vfb->unpin(vfb);
+                       DRM_ERROR("Could not map the fbdev framebuffer.\n");
+                       goto out_unlock;
+               }
+
+               par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+       }
+
+
        vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
                          par->set_fb->width, par->set_fb->height);
 
index e959df6ede83e0a7b3b72e3c16df313f4df98940..26ac8e80a4786a674f52fb72792b4d6160416b27 100644 (file)
@@ -46,7 +46,7 @@ struct vmw_fence_manager {
        bool goal_irq_on; /* Protected by @goal_irq_mutex */
        bool seqno_valid; /* Protected by @lock, and may not be set to true
                             without the @goal_irq_mutex held. */
-       unsigned ctx;
+       u64 ctx;
 };
 
 struct vmw_user_fence {
index 55231cce73a01f047a1d660d3f3192d1f7ba80a6..bf28ccc150dfe081924fc08cf248d898003846f7 100644 (file)
@@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
        return 0;
 }
 
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
-                          u16 *r, u16 *g, u16 *b,
-                          uint32_t start, uint32_t size)
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+                         u16 *r, u16 *g, u16 *b,
+                         uint32_t size)
 {
        struct vmw_private *dev_priv = vmw_priv(crtc->dev);
        int i;
@@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
                vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
                vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
        }
+
+       return 0;
 }
 
 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
@@ -1553,14 +1555,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
                DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
        };
        int i;
-       u32 assumed_bpp = 2;
+       u32 assumed_bpp = 4;
 
-       /*
-        * If using screen objects, then assume 32-bpp because that's what the
-        * SVGA device is assuming
-        */
-       if (dev_priv->active_display_unit == vmw_du_screen_object)
-               assumed_bpp = 4;
+       if (dev_priv->assume_16bpp)
+               assumed_bpp = 2;
 
        if (dev_priv->active_display_unit == vmw_du_screen_target) {
                max_width  = min(max_width,  dev_priv->stdu_max_width);
index 57203212c501dc0577d45d76661c7fafe6651351..ff4803c107bcdbff9f1b6f4e8cf3cfbbc66cba32 100644 (file)
@@ -195,9 +195,9 @@ struct vmw_display_unit {
 void vmw_du_cleanup(struct vmw_display_unit *du);
 void vmw_du_crtc_save(struct drm_crtc *crtc);
 void vmw_du_crtc_restore(struct drm_crtc *crtc);
-void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
                           u16 *r, u16 *g, u16 *b,
-                          uint32_t start, uint32_t size);
+                          uint32_t size);
 int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
                            uint32_t handle, uint32_t width, uint32_t height,
                            int32_t hot_x, int32_t hot_y);
index 6de283c8fa3edf0088ca6ee25193802ce64480bf..e57a0bad7a626daf505625ce019f2583218a51ab 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/frame.h>
 #include <asm/hypervisor.h>
 #include "drmP.h"
 #include "vmwgfx_msg.h"
@@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
 
        return -EINVAL;
 }
-
+STACK_FRAME_NON_STANDARD(vmw_send_msg);
 
 
 /**
@@ -299,11 +300,15 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
                break;
        }
 
+       if (retries == RETRIES)
+               return -EINVAL;
+
        *msg_len = reply_len;
        *msg     = reply;
 
        return 0;
 }
+STACK_FRAME_NON_STANDARD(vmw_recv_msg);
 
 
 /**
index 9ca818fb034c923c4737ccfedb3ea98002aaae84..41932a7c4f79516da86f9e35d1223c8e033cf248 100644 (file)
@@ -399,8 +399,10 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
 
        WARN_ON_ONCE(!stdu->defined);
 
-       if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
-           new_fb->height == mode->vdisplay)
+       new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+
+       if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay &&
+           new_vfbs->surface->base_size.height == mode->vdisplay)
                new_content_type = SAME_AS_DISPLAY;
        else if (vfb->dmabuf)
                new_content_type = SEPARATE_DMA;
@@ -444,7 +446,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
                        content_srf.mip_levels[0]     = 1;
                        content_srf.multisample_count = 0;
                } else {
-                       new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
                        content_srf = *new_vfbs->surface;
                }
 
@@ -464,7 +465,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
                        return ret;
                }
        } else if (new_content_type == SAME_AS_DISPLAY) {
-               new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
                new_display_srf = vmw_surface_reference(new_vfbs->surface);
        }
 
index a18db4d5347cefa99847df944be3a3b364de089a..c5d82a8a2ec9c2498bf9e7ed96823cb14db8afae 100644 (file)
@@ -96,12 +96,12 @@ fail:
  */
 static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
 {
-       u32 pos = pb->pos;
-       u32 *p = (u32 *)((void *)pb->mapped + pos);
-       WARN_ON(pos == pb->fence);
+       u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
+
+       WARN_ON(pb->pos == pb->fence);
        *(p++) = op1;
        *(p++) = op2;
-       pb->pos = (pos + 8) & (pb->size_bytes - 1);
+       pb->pos = (pb->pos + 8) & (pb->size_bytes - 1);
 }
 
 /*
@@ -134,14 +134,19 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
                                     enum cdma_event event)
 {
        for (;;) {
+               struct push_buffer *pb = &cdma->push_buffer;
                unsigned int space;
 
-               if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
+               switch (event) {
+               case CDMA_EVENT_SYNC_QUEUE_EMPTY:
                        space = list_empty(&cdma->sync_queue) ? 1 : 0;
-               else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) {
-                       struct push_buffer *pb = &cdma->push_buffer;
+                       break;
+
+               case CDMA_EVENT_PUSH_BUFFER_SPACE:
                        space = host1x_pushbuffer_space(pb);
-               } else {
+                       break;
+
+               default:
                        WARN_ON(1);
                        return -EINVAL;
                }
@@ -159,12 +164,14 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
                        mutex_lock(&cdma->lock);
                        continue;
                }
+
                cdma->event = event;
 
                mutex_unlock(&cdma->lock);
                down(&cdma->sem);
                mutex_lock(&cdma->lock);
        }
+
        return 0;
 }
 
@@ -234,6 +241,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
                        /* Start timer on next pending syncpt */
                        if (job->timeout)
                                cdma_start_timer_locked(cdma, job);
+
                        break;
                }
 
@@ -247,7 +255,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
                /* Pop push buffer slots */
                if (job->num_slots) {
                        struct push_buffer *pb = &cdma->push_buffer;
+
                        host1x_pushbuffer_pop(pb, job->num_slots);
+
                        if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
                                signal = true;
                }
@@ -269,11 +279,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
 void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
                                   struct device *dev)
 {
-       u32 restart_addr;
-       u32 syncpt_incrs;
-       struct host1x_job *job = NULL;
-       u32 syncpt_val;
        struct host1x *host1x = cdma_to_host1x(cdma);
+       u32 restart_addr, syncpt_incrs, syncpt_val;
+       struct host1x_job *job = NULL;
 
        syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
 
@@ -342,9 +350,11 @@ void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
                syncpt_val += syncpt_incrs;
        }
 
-       /* The following sumbits from the same client may be dependent on the
+       /*
+        * The following sumbits from the same client may be dependent on the
         * failed submit and therefore they may fail. Force a small timeout
-        * to make the queue cleanup faster */
+        * to make the queue cleanup faster.
+        */
 
        list_for_each_entry_from(job, &cdma->sync_queue, list)
                if (job->client == cdma->timeout.client)
@@ -375,6 +385,7 @@ int host1x_cdma_init(struct host1x_cdma *cdma)
        err = host1x_pushbuffer_init(&cdma->push_buffer);
        if (err)
                return err;
+
        return 0;
 }
 
@@ -410,6 +421,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
                /* init state on first submit with timeout value */
                if (!cdma->timeout.initialized) {
                        int err;
+
                        err = host1x_hw_cdma_timeout_init(host1x, cdma,
                                                          job->syncpt_id);
                        if (err) {
@@ -418,6 +430,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
                        }
                }
        }
+
        if (!cdma->running)
                host1x_hw_cdma_start(host1x, cdma);
 
@@ -448,6 +461,7 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
                slots_free = host1x_cdma_wait_locked(cdma,
                                                CDMA_EVENT_PUSH_BUFFER_SPACE);
        }
+
        cdma->slots_free = slots_free - 1;
        cdma->slots_used++;
        host1x_pushbuffer_push(pb, op1, op2);
index b4ae3affb987bb92ed7dcd1f3317714b260fd61d..8f437d924c10c700cbf46f060d32c2d902a980f1 100644 (file)
@@ -83,9 +83,10 @@ EXPORT_SYMBOL(host1x_channel_put);
 struct host1x_channel *host1x_channel_request(struct device *dev)
 {
        struct host1x *host = dev_get_drvdata(dev->parent);
-       int max_channels = host->info->nb_channels;
+       unsigned int max_channels = host->info->nb_channels;
        struct host1x_channel *channel = NULL;
-       int index, err;
+       unsigned long index;
+       int err;
 
        mutex_lock(&host->chlist_mutex);
 
index ee3d12b51c50a036b89c48c3e60576a2a14acf5b..d9330fcc62ad5c88a6090e1c07eb0a8115019402 100644 (file)
@@ -39,6 +39,7 @@ void host1x_debug_output(struct output *o, const char *fmt, ...)
        va_start(args, fmt);
        len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
        va_end(args);
+
        o->fn(o->ctx, o->buf, len);
 }
 
@@ -48,13 +49,17 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
        struct output *o = data;
 
        mutex_lock(&ch->reflock);
+
        if (ch->refcount) {
                mutex_lock(&ch->cdma.lock);
+
                if (show_fifo)
                        host1x_hw_show_channel_fifo(m, ch, o);
+
                host1x_hw_show_channel_cdma(m, ch, o);
                mutex_unlock(&ch->cdma.lock);
        }
+
        mutex_unlock(&ch->reflock);
 
        return 0;
@@ -62,22 +67,27 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
 
 static void show_syncpts(struct host1x *m, struct output *o)
 {
-       int i;
+       unsigned int i;
+
        host1x_debug_output(o, "---- syncpts ----\n");
+
        for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
                u32 max = host1x_syncpt_read_max(m->syncpt + i);
                u32 min = host1x_syncpt_load(m->syncpt + i);
+
                if (!min && !max)
                        continue;
-               host1x_debug_output(o, "id %d (%s) min %d max %d\n",
+
+               host1x_debug_output(o, "id %u (%s) min %d max %d\n",
                                    i, m->syncpt[i].name, min, max);
        }
 
        for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
                u32 base_val;
+
                base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
                if (base_val)
-                       host1x_debug_output(o, "waitbase id %d val %d\n", i,
+                       host1x_debug_output(o, "waitbase id %u val %d\n", i,
                                            base_val);
        }
 
@@ -114,7 +124,9 @@ static int host1x_debug_show_all(struct seq_file *s, void *unused)
                .fn = write_to_seqfile,
                .ctx = s
        };
+
        show_all(s->private, &o);
+
        return 0;
 }
 
@@ -124,7 +136,9 @@ static int host1x_debug_show(struct seq_file *s, void *unused)
                .fn = write_to_seqfile,
                .ctx = s
        };
+
        show_all_no_fifo(s->private, &o);
+
        return 0;
 }
 
@@ -134,10 +148,10 @@ static int host1x_debug_open_all(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations host1x_debug_all_fops = {
-       .open           = host1x_debug_open_all,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+       .open = host1x_debug_open_all,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
 };
 
 static int host1x_debug_open(struct inode *inode, struct file *file)
@@ -146,10 +160,10 @@ static int host1x_debug_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations host1x_debug_fops = {
-       .open           = host1x_debug_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
+       .open = host1x_debug_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
 };
 
 static void host1x_debugfs_init(struct host1x *host1x)
@@ -201,6 +215,7 @@ void host1x_debug_dump(struct host1x *host1x)
        struct output o = {
                .fn = write_to_printk
        };
+
        show_all(host1x, &o);
 }
 
@@ -209,5 +224,6 @@ void host1x_debug_dump_syncpts(struct host1x *host1x)
        struct output o = {
                .fn = write_to_printk
        };
+
        show_syncpts(host1x, &o);
 }
index ff348690df94a0eebcddaadedfbf440cb05cac4a..a62317af76ad7b8b90593c2fda904a102e360701 100644 (file)
@@ -63,13 +63,13 @@ u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
 }
 
 static const struct host1x_info host1x01_info = {
-       .nb_channels    = 8,
-       .nb_pts         = 32,
-       .nb_mlocks      = 16,
-       .nb_bases       = 8,
-       .init           = host1x01_init,
-       .sync_offset    = 0x3000,
-       .dma_mask       = DMA_BIT_MASK(32),
+       .nb_channels = 8,
+       .nb_pts = 32,
+       .nb_mlocks = 16,
+       .nb_bases = 8,
+       .init = host1x01_init,
+       .sync_offset = 0x3000,
+       .dma_mask = DMA_BIT_MASK(32),
 };
 
 static const struct host1x_info host1x02_info = {
@@ -102,7 +102,7 @@ static const struct host1x_info host1x05_info = {
        .dma_mask = DMA_BIT_MASK(34),
 };
 
-static struct of_device_id host1x_of_match[] = {
+static const struct of_device_id host1x_of_match[] = {
        { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
        { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
        { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
index dace124994bb2683cc0c66218dcd5cdd15902d2b..5220510f39dae91c219b29d40062b3e689fcb815 100644 (file)
@@ -45,7 +45,7 @@ struct host1x_cdma_ops {
        void (*start)(struct host1x_cdma *cdma);
        void (*stop)(struct host1x_cdma *cdma);
        void (*flush)(struct  host1x_cdma *cdma);
-       int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id);
+       int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
        void (*timeout_destroy)(struct host1x_cdma *cdma);
        void (*freeze)(struct host1x_cdma *cdma);
        void (*resume)(struct host1x_cdma *cdma, u32 getptr);
@@ -82,21 +82,21 @@ struct host1x_intr_ops {
        int (*init_host_sync)(struct host1x *host, u32 cpm,
                void (*syncpt_thresh_work)(struct work_struct *work));
        void (*set_syncpt_threshold)(
-               struct host1x *host, u32 id, u32 thresh);
-       void (*enable_syncpt_intr)(struct host1x *host, u32 id);
-       void (*disable_syncpt_intr)(struct host1x *host, u32 id);
+               struct host1x *host, unsigned int id, u32 thresh);
+       void (*enable_syncpt_intr)(struct host1x *host, unsigned int id);
+       void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
        void (*disable_all_syncpt_intrs)(struct host1x *host);
        int (*free_syncpt_irq)(struct host1x *host);
 };
 
 struct host1x_info {
-       int     nb_channels;            /* host1x: num channels supported */
-       int     nb_pts;                 /* host1x: num syncpoints supported */
-       int     nb_bases;               /* host1x: num syncpoints supported */
-       int     nb_mlocks;              /* host1x: number of mlocks */
-       int     (*init)(struct host1x *); /* initialize per SoC ops */
-       int     sync_offset;
-       u64     dma_mask;               /* mask of addressable memory */
+       unsigned int nb_channels; /* host1x: number of channels supported */
+       unsigned int nb_pts; /* host1x: number of syncpoints supported */
+       unsigned int nb_bases; /* host1x: number of syncpoint bases supported */
+       unsigned int nb_mlocks; /* host1x: number of mlocks supported */
+       int (*init)(struct host1x *host1x); /* initialize per SoC ops */
+       unsigned int sync_offset; /* offset of syncpoint registers */
+       u64 dma_mask; /* mask of addressable memory */
 };
 
 struct host1x {
@@ -109,7 +109,6 @@ struct host1x {
        struct clk *clk;
 
        struct mutex intr_mutex;
-       struct workqueue_struct *intr_wq;
        int intr_syncpt_irq;
 
        const struct host1x_syncpt_ops *syncpt_op;
@@ -183,19 +182,20 @@ static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
 }
 
 static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
-                                                      u32 id, u32 thresh)
+                                                      unsigned int id,
+                                                      u32 thresh)
 {
        host->intr_op->set_syncpt_threshold(host, id, thresh);
 }
 
 static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
-                                                    u32 id)
+                                                    unsigned int id)
 {
        host->intr_op->enable_syncpt_intr(host, id);
 }
 
 static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
-                                                     u32 id)
+                                                     unsigned int id)
 {
        host->intr_op->disable_syncpt_intr(host, id);
 }
@@ -212,9 +212,9 @@ static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
 
 static inline int host1x_hw_channel_init(struct host1x *host,
                                         struct host1x_channel *channel,
-                                        int chid)
+                                        unsigned int id)
 {
-       return host->channel_op->init(channel, host, chid);
+       return host->channel_op->init(channel, host, id);
 }
 
 static inline int host1x_hw_channel_submit(struct host1x *host,
@@ -243,9 +243,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host,
 
 static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
                                              struct host1x_cdma *cdma,
-                                             u32 syncpt_id)
+                                             unsigned int syncpt)
 {
-       return host->cdma_op->timeout_init(cdma, syncpt_id);
+       return host->cdma_op->timeout_init(cdma, syncpt);
 }
 
 static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
index 305ea8f3382d22da2611e6de7025695265d97699..659c1bbfeeba2316df23976b2ce78217f578ffad 100644 (file)
@@ -41,7 +41,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
 {
        struct host1x *host1x = cdma_to_host1x(cdma);
        struct push_buffer *pb = &cdma->push_buffer;
-       u32 i;
+       unsigned int i;
 
        for (i = 0; i < syncpt_incrs; i++)
                host1x_syncpt_incr(cdma->timeout.syncpt);
@@ -58,6 +58,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
                        &pb->phys, getptr);
                getptr = (getptr + 8) & (pb->size_bytes - 1);
        }
+
        wmb();
 }
 
@@ -162,12 +163,14 @@ static void cdma_stop(struct host1x_cdma *cdma)
        struct host1x_channel *ch = cdma_to_channel(cdma);
 
        mutex_lock(&cdma->lock);
+
        if (cdma->running) {
                host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
                host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
                                 HOST1X_CHANNEL_DMACTRL);
                cdma->running = false;
        }
+
        mutex_unlock(&cdma->lock);
 }
 
@@ -213,11 +216,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
        u32 cmdproc_stop;
 
        dev_dbg(host1x->dev,
-               "resuming channel (id %d, DMAGET restart = 0x%x)\n",
+               "resuming channel (id %u, DMAGET restart = 0x%x)\n",
                ch->id, getptr);
 
        cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
-       cmdproc_stop &= ~(BIT(ch->id));
+       cmdproc_stop &= ~BIT(ch->id);
        host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
 
        cdma->torndown = false;
@@ -231,14 +234,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
  */
 static void cdma_timeout_handler(struct work_struct *work)
 {
+       u32 prev_cmdproc, cmdproc_stop, syncpt_val;
        struct host1x_cdma *cdma;
        struct host1x *host1x;
        struct host1x_channel *ch;
 
-       u32 syncpt_val;
-
-       u32 prev_cmdproc, cmdproc_stop;
-
        cdma = container_of(to_delayed_work(work), struct host1x_cdma,
                            timeout.wq);
        host1x = cdma_to_host1x(cdma);
@@ -277,9 +277,9 @@ static void cdma_timeout_handler(struct work_struct *work)
                return;
        }
 
-       dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n",
-               __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
-               syncpt_val, cdma->timeout.syncpt_val);
+       dev_warn(host1x->dev, "%s: timeout: %u (%s), HW thresh %d, done %d\n",
+                __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
+                syncpt_val, cdma->timeout.syncpt_val);
 
        /* stop HW, resetting channel/module */
        host1x_hw_cdma_freeze(host1x, cdma);
@@ -291,7 +291,7 @@ static void cdma_timeout_handler(struct work_struct *work)
 /*
  * Init timeout resources
  */
-static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id)
+static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
 {
        INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
        cdma->timeout.initialized = true;
@@ -306,6 +306,7 @@ static void cdma_timeout_destroy(struct host1x_cdma *cdma)
 {
        if (cdma->timeout.initialized)
                cancel_delayed_work(&cdma->timeout.wq);
+
        cdma->timeout.initialized = false;
 }
 
index 946c332c3906ad823826b56b0c25a3300715424c..5e8df78b7acd82acbd2539671c167527fc512828 100644 (file)
@@ -46,6 +46,7 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
                 */
                for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
                        u32 num_words = min(words - i, TRACE_MAX_LENGTH);
+
                        offset += i * sizeof(u32);
 
                        trace_host1x_cdma_push_gather(dev_name(dev), bo,
@@ -66,6 +67,7 @@ static void submit_gathers(struct host1x_job *job)
                struct host1x_job_gather *g = &job->gathers[i];
                u32 op1 = host1x_opcode_gather(g->words);
                u32 op2 = g->base + g->offset;
+
                trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
                host1x_cdma_push(cdma, op1, op2);
        }
@@ -75,7 +77,8 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
 {
        struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
        struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
-       u32 id, value;
+       unsigned int id;
+       u32 value;
 
        value = host1x_syncpt_read_max(sp);
        id = sp->base->id;
index cc3f1825c735e5cf10aad66ff72c11003bc15737..7a4a3286e4a7c284e17e4940a846143730f2ad2d 100644 (file)
@@ -40,8 +40,7 @@ enum {
 
 static unsigned int show_channel_command(struct output *o, u32 val)
 {
-       unsigned mask;
-       unsigned subop;
+       unsigned int mask, subop;
 
        switch (val >> 28) {
        case HOST1X_OPCODE_SETCLASS:
@@ -51,12 +50,11 @@ static unsigned int show_channel_command(struct output *o, u32 val)
                                            val >> 6 & 0x3ff,
                                            val >> 16 & 0xfff, mask);
                        return hweight8(mask);
-               } else {
-                       host1x_debug_output(o, "SETCL(class=%03x)\n",
-                                           val >> 6 & 0x3ff);
-                       return 0;
                }
 
+               host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
+               return 0;
+
        case HOST1X_OPCODE_INCR:
                host1x_debug_output(o, "INCR(offset=%03x, [",
                                    val >> 16 & 0xfff);
@@ -143,7 +141,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
        struct host1x_job *job;
 
        list_for_each_entry(job, &cdma->sync_queue, list) {
-               int i;
+               unsigned int i;
+
                host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
                                    job, job->syncpt_id, job->syncpt_end,
                                    job->first_get, job->timeout,
@@ -190,7 +189,7 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
        cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
        cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
 
-       host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
+       host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
 
        if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
            !ch->cdma.push_buffer.mapped) {
@@ -200,14 +199,13 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
 
        if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
            HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
-           HOST1X_UCLASS_WAIT_SYNCPT)
+                       HOST1X_UCLASS_WAIT_SYNCPT)
                host1x_debug_output(o, "waiting on syncpt %d val %d\n",
                                    cbread >> 24, cbread & 0xffffff);
        else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
-          HOST1X_CLASS_HOST1X &&
-          HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
-          HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
-
+                               HOST1X_CLASS_HOST1X &&
+                HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
+                               HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
                base = (cbread >> 16) & 0xff;
                baseval =
                        host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
@@ -236,7 +234,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
        u32 val, rd_ptr, wr_ptr, start, end;
        unsigned int data_count = 0;
 
-       host1x_debug_output(o, "%d: fifo:\n", ch->id);
+       host1x_debug_output(o, "%u: fifo:\n", ch->id);
 
        val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
        host1x_debug_output(o, "FIFOSTAT %08x\n", val);
@@ -290,20 +288,22 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
 
 static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
 {
-       int i;
+       unsigned int i;
 
        host1x_debug_output(o, "---- mlocks ----\n");
+
        for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
                u32 owner =
                        host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
                if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
-                       host1x_debug_output(o, "%d: locked by channel %d\n",
+                       host1x_debug_output(o, "%u: locked by channel %u\n",
                                i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
                else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
-                       host1x_debug_output(o, "%d: locked by cpu\n", i);
+                       host1x_debug_output(o, "%u: locked by cpu\n", i);
                else
-                       host1x_debug_output(o, "%d: unlocked\n", i);
+                       host1x_debug_output(o, "%u: unlocked\n", i);
        }
+
        host1x_debug_output(o, "\n");
 }
 
index e1e31e9e67cd96bdb17297ec66fb1ad8415a0ebd..dacb8009a605860a3153f1b2b00cf514007fb07b 100644 (file)
@@ -38,14 +38,14 @@ static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
        host1x_sync_writel(host, BIT_MASK(id),
                HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
 
-       queue_work(host->intr_wq, &syncpt->intr.work);
+       schedule_work(&syncpt->intr.work);
 }
 
 static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
 {
        struct host1x *host = dev_id;
        unsigned long reg;
-       int i, id;
+       unsigned int i, id;
 
        for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
                reg = host1x_sync_readl(host,
@@ -62,7 +62,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
 
 static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
 {
-       u32 i;
+       unsigned int i;
 
        for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
                host1x_sync_writel(host, 0xffffffffu,
@@ -72,10 +72,12 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
        }
 }
 
-static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
-       void (*syncpt_thresh_work)(struct work_struct *))
+static int
+_host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
+                           void (*syncpt_thresh_work)(struct work_struct *))
 {
-       int i, err;
+       unsigned int i;
+       int err;
 
        host1x_hw_intr_disable_all_syncpt_intrs(host);
 
@@ -106,18 +108,21 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
 }
 
 static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
-       u32 id, u32 thresh)
+                                             unsigned int id,
+                                             u32 thresh)
 {
        host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
 }
 
-static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_enable_syncpt_intr(struct host1x *host,
+                                           unsigned int id)
 {
        host1x_sync_writel(host, BIT_MASK(id),
                HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
 }
 
-static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
+static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
+                                            unsigned int id)
 {
        host1x_sync_writel(host, BIT_MASK(id),
                HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
@@ -127,8 +132,13 @@ static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
 
 static int _host1x_free_syncpt_irq(struct host1x *host)
 {
+       unsigned int i;
+
        devm_free_irq(host->dev, host->intr_syncpt_irq, host);
-       flush_workqueue(host->intr_wq);
+
+       for (i = 0; i < host->info->nb_pts; i++)
+               cancel_work_sync(&host->syncpt[i].intr.work);
+
        return 0;
 }
 
index 56e85395ac24112faf17b4b06a1aa950e671df0b..c93f74fcce72cf4a22ce98fc1a277a7e480d937f 100644 (file)
@@ -26,8 +26,9 @@
  */
 static void syncpt_restore(struct host1x_syncpt *sp)
 {
+       u32 min = host1x_syncpt_read_min(sp);
        struct host1x *host = sp->host;
-       int min = host1x_syncpt_read_min(sp);
+
        host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
 }
 
@@ -37,6 +38,7 @@ static void syncpt_restore(struct host1x_syncpt *sp)
 static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
 {
        struct host1x *host = sp->host;
+
        host1x_sync_writel(host, sp->base_val,
                           HOST1X_SYNC_SYNCPT_BASE(sp->id));
 }
@@ -47,6 +49,7 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
 static void syncpt_read_wait_base(struct host1x_syncpt *sp)
 {
        struct host1x *host = sp->host;
+
        sp->base_val =
                host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
 }
@@ -85,6 +88,7 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
        if (!host1x_syncpt_client_managed(sp) &&
            host1x_syncpt_idle(sp))
                return -EINVAL;
+
        host1x_sync_writel(host, BIT_MASK(sp->id),
                           HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
        wmb();
@@ -95,10 +99,10 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
 /* remove a wait pointed to by patch_addr */
 static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
 {
-       u32 override = host1x_class_host_wait_syncpt(
-               HOST1X_SYNCPT_RESERVED, 0);
+       u32 override = host1x_class_host_wait_syncpt(HOST1X_SYNCPT_RESERVED, 0);
 
        *((u32 *)patch_addr) = override;
+
        return 0;
 }
 
index 2491bf82e30cafa05f5fc10721fd4a13993c6477..8b4fad0ab35d24de11380ec00f7e8e6802db5c3a 100644 (file)
@@ -122,18 +122,20 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
 static void action_wakeup(struct host1x_waitlist *waiter)
 {
        wait_queue_head_t *wq = waiter->data;
+
        wake_up(wq);
 }
 
 static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
 {
        wait_queue_head_t *wq = waiter->data;
+
        wake_up_interruptible(wq);
 }
 
 typedef void (*action_handler)(struct host1x_waitlist *waiter);
 
-static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
+static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
        action_submit_complete,
        action_wakeup,
        action_wakeup_interruptible,
@@ -209,7 +211,7 @@ static void syncpt_thresh_work(struct work_struct *work)
                                host1x_syncpt_load(host->syncpt + id));
 }
 
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
                           enum host1x_intr_action action, void *data,
                           struct host1x_waitlist *waiter, void **ref)
 {
@@ -254,7 +256,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
        return 0;
 }
 
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
 {
        struct host1x_waitlist *waiter = ref;
        struct host1x_syncpt *syncpt;
@@ -277,9 +279,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
 
        mutex_init(&host->intr_mutex);
        host->intr_syncpt_irq = irq_sync;
-       host->intr_wq = create_workqueue("host_syncpt");
-       if (!host->intr_wq)
-               return -ENOMEM;
 
        for (id = 0; id < nb_pts; ++id) {
                struct host1x_syncpt *syncpt = host->syncpt + id;
@@ -288,7 +287,7 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
                INIT_LIST_HEAD(&syncpt->intr.wait_head);
                snprintf(syncpt->intr.thresh_irq_name,
                         sizeof(syncpt->intr.thresh_irq_name),
-                        "host1x_sp_%02d", id);
+                        "host1x_sp_%02u", id);
        }
 
        host1x_intr_start(host);
@@ -299,7 +298,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
 void host1x_intr_deinit(struct host1x *host)
 {
        host1x_intr_stop(host);
-       destroy_workqueue(host->intr_wq);
 }
 
 void host1x_intr_start(struct host1x *host)
@@ -342,7 +340,7 @@ void host1x_intr_stop(struct host1x *host)
                if (!list_empty(&syncpt[id].intr.wait_head)) {
                        /* output diagnostics */
                        mutex_unlock(&host->intr_mutex);
-                       pr_warn("%s cannot stop syncpt intr id=%d\n",
+                       pr_warn("%s cannot stop syncpt intr id=%u\n",
                                __func__, id);
                        return;
                }
index 2b8adf016a05b0fe3cbec0cc9cdb49e2da88ea3b..1370c2bb75b810ff5e275cfbb3b8ef8edb459183 100644 (file)
@@ -75,7 +75,7 @@ struct host1x_waitlist {
  *
  * This is a non-blocking api.
  */
-int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
+int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
        enum host1x_intr_action action, void *data,
        struct host1x_waitlist *waiter, void **ref);
 
@@ -84,7 +84,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
  * You must call this if you passed non-NULL as ref.
  * @ref the ref returned from host1x_intr_add_action()
  */
-void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref);
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref);
 
 /* Initialize host1x sync point interrupt */
 int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
index b4515d54403967dad522594c19dd2052f9c875c0..a91b7c4a6110f8b0bfe74589e32a226fefe7f341 100644 (file)
@@ -161,7 +161,7 @@ static int do_waitchks(struct host1x_job *job, struct host1x *host,
 
                if (host1x_syncpt_is_expired(sp, wait->thresh)) {
                        dev_dbg(host->dev,
-                               "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
+                               "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
                                wait->syncpt_id, sp->name, wait->thresh,
                                host1x_syncpt_read_min(sp));
 
@@ -464,6 +464,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
 
        for (i = 0; i < job->num_gathers; i++) {
                struct host1x_job_gather *g = &job->gathers[i];
+
                size += g->words * sizeof(u32);
        }
 
@@ -514,6 +515,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
        bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
        for (i = 0; i < job->num_waitchk; i++) {
                u32 syncpt_id = job->waitchk[i].syncpt_id;
+
                if (syncpt_id < host1x_syncpt_nb_pts(host))
                        set_bit(syncpt_id, waitchk_mask);
        }
@@ -571,14 +573,16 @@ void host1x_job_unpin(struct host1x_job *job)
 
        for (i = 0; i < job->num_unpins; i++) {
                struct host1x_job_unpin_data *unpin = &job->unpins[i];
+
                host1x_bo_unpin(unpin->bo, unpin->sgt);
                host1x_bo_put(unpin->bo);
        }
+
        job->num_unpins = 0;
 
        if (job->gather_copy_size)
                dma_free_wc(job->channel->dev, job->gather_copy_size,
-                           job->gather_copy_mapped, job->gather_copy);
+                           job->gather_copy_mapped, job->gather_copy);
 }
 EXPORT_SYMBOL(host1x_job_unpin);
 
index 6b7fdc1e2ed078c337c8fa235633fcc105c4ccde..95589328ad52a27a7ba345f097a5740cca67d34f 100644 (file)
@@ -73,7 +73,7 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
                        return NULL;
        }
 
-       name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id,
+       name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
                        dev ? dev_name(dev) : NULL);
        if (!name)
                return NULL;
@@ -110,12 +110,14 @@ EXPORT_SYMBOL(host1x_syncpt_incr_max);
 void host1x_syncpt_restore(struct host1x *host)
 {
        struct host1x_syncpt *sp_base = host->syncpt;
-       u32 i;
+       unsigned int i;
 
        for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
                host1x_hw_syncpt_restore(host, sp_base + i);
+
        for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
                host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
+
        wmb();
 }
 
@@ -126,7 +128,7 @@ void host1x_syncpt_restore(struct host1x *host)
 void host1x_syncpt_save(struct host1x *host)
 {
        struct host1x_syncpt *sp_base = host->syncpt;
-       u32 i;
+       unsigned int i;
 
        for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
                if (host1x_syncpt_client_managed(sp_base + i))
@@ -146,6 +148,7 @@ void host1x_syncpt_save(struct host1x *host)
 u32 host1x_syncpt_load(struct host1x_syncpt *sp)
 {
        u32 val;
+
        val = host1x_hw_syncpt_load(sp->host, sp);
        trace_host1x_syncpt_load_min(sp->id, val);
 
@@ -157,10 +160,9 @@ u32 host1x_syncpt_load(struct host1x_syncpt *sp)
  */
 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
 {
-       u32 val;
        host1x_hw_syncpt_load_wait_base(sp->host, sp);
-       val = sp->base_val;
-       return val;
+
+       return sp->base_val;
 }
 
 /*
@@ -179,6 +181,7 @@ EXPORT_SYMBOL(host1x_syncpt_incr);
 static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
 {
        host1x_hw_syncpt_load(sp->host, sp);
+
        return host1x_syncpt_is_expired(sp, thresh);
 }
 
@@ -186,7 +189,7 @@ static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
  * Main entrypoint for syncpoint value waits.
  */
 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
-                       u32 *value)
+                      u32 *value)
 {
        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
        void *ref;
@@ -201,6 +204,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
        if (host1x_syncpt_is_expired(sp, thresh)) {
                if (value)
                        *value = host1x_syncpt_load(sp);
+
                return 0;
        }
 
@@ -209,6 +213,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
        if (host1x_syncpt_is_expired(sp, thresh)) {
                if (value)
                        *value = val;
+
                goto done;
        }
 
@@ -239,32 +244,42 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
        /* wait for the syncpoint, or timeout, or signal */
        while (timeout) {
                long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
-               int remain = wait_event_interruptible_timeout(wq,
+               int remain;
+
+               remain = wait_event_interruptible_timeout(wq,
                                syncpt_load_min_is_expired(sp, thresh),
                                check);
                if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
                        if (value)
                                *value = host1x_syncpt_load(sp);
+
                        err = 0;
+
                        break;
                }
+
                if (remain < 0) {
                        err = remain;
                        break;
                }
+
                timeout -= check;
+
                if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
                        dev_warn(sp->host->dev,
-                               "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n",
+                               "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
                                 current->comm, sp->id, sp->name,
                                 thresh, timeout);
 
                        host1x_debug_dump_syncpts(sp->host);
+
                        if (check_count == MAX_STUCK_CHECK_COUNT)
                                host1x_debug_dump(sp->host);
+
                        check_count++;
                }
        }
+
        host1x_intr_put_ref(sp->host, sp->id, ref);
 
 done:
@@ -279,7 +294,9 @@ bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
 {
        u32 current_val;
        u32 future_val;
+
        smp_rmb();
+
        current_val = (u32)atomic_read(&sp->min_val);
        future_val = (u32)atomic_read(&sp->max_val);
 
@@ -341,14 +358,14 @@ int host1x_syncpt_init(struct host1x *host)
 {
        struct host1x_syncpt_base *bases;
        struct host1x_syncpt *syncpt;
-       int i;
+       unsigned int i;
 
-       syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts,
+       syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
                              GFP_KERNEL);
        if (!syncpt)
                return -ENOMEM;
 
-       bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases,
+       bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
                             GFP_KERNEL);
        if (!bases)
                return -ENOMEM;
@@ -378,6 +395,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
                                            unsigned long flags)
 {
        struct host1x *host = dev_get_drvdata(dev->parent);
+
        return host1x_syncpt_alloc(host, dev, flags);
 }
 EXPORT_SYMBOL(host1x_syncpt_request);
@@ -398,8 +416,9 @@ EXPORT_SYMBOL(host1x_syncpt_free);
 
 void host1x_syncpt_deinit(struct host1x *host)
 {
-       int i;
        struct host1x_syncpt *sp = host->syncpt;
+       unsigned int i;
+
        for (i = 0; i < host->info->nb_pts; i++, sp++)
                kfree(sp->name);
 }
@@ -407,10 +426,11 @@ void host1x_syncpt_deinit(struct host1x *host)
 /*
  * Read max. It indicates how many operations there are in queue, either in
  * channel or in a software thread.
- * */
+ */
 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
 {
        smp_rmb();
+
        return (u32)atomic_read(&sp->max_val);
 }
 EXPORT_SYMBOL(host1x_syncpt_read_max);
@@ -421,6 +441,7 @@ EXPORT_SYMBOL(host1x_syncpt_read_max);
 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
 {
        smp_rmb();
+
        return (u32)atomic_read(&sp->min_val);
 }
 EXPORT_SYMBOL(host1x_syncpt_read_min);
@@ -431,25 +452,26 @@ u32 host1x_syncpt_read(struct host1x_syncpt *sp)
 }
 EXPORT_SYMBOL(host1x_syncpt_read);
 
-int host1x_syncpt_nb_pts(struct host1x *host)
+unsigned int host1x_syncpt_nb_pts(struct host1x *host)
 {
        return host->info->nb_pts;
 }
 
-int host1x_syncpt_nb_bases(struct host1x *host)
+unsigned int host1x_syncpt_nb_bases(struct host1x *host)
 {
        return host->info->nb_bases;
 }
 
-int host1x_syncpt_nb_mlocks(struct host1x *host)
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
 {
        return host->info->nb_mlocks;
 }
 
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id)
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
 {
        if (host->info->nb_pts < id)
                return NULL;
+
        return host->syncpt + id;
 }
 EXPORT_SYMBOL(host1x_syncpt_get);
index 9056465ecd3f1ea88963c397b6f2fdd0ae5f407a..f719205105ac19192bd8b7881d17ab87810696eb 100644 (file)
@@ -37,7 +37,7 @@ struct host1x_syncpt_base {
 };
 
 struct host1x_syncpt {
-       int id;
+       unsigned int id;
        atomic_t min_val;
        atomic_t max_val;
        u32 base_val;
@@ -58,13 +58,13 @@ int host1x_syncpt_init(struct host1x *host);
 void host1x_syncpt_deinit(struct host1x *host);
 
 /* Return number of sync point supported. */
-int host1x_syncpt_nb_pts(struct host1x *host);
+unsigned int host1x_syncpt_nb_pts(struct host1x *host);
 
 /* Return number of wait bases supported. */
-int host1x_syncpt_nb_bases(struct host1x *host);
+unsigned int host1x_syncpt_nb_bases(struct host1x *host);
 
 /* Return number of mlocks supported. */
-int host1x_syncpt_nb_mlocks(struct host1x *host);
+unsigned int host1x_syncpt_nb_mlocks(struct host1x *host);
 
 /*
  * Check sync point sanity. If max is larger than min, there have too many
index 2f29780e7c6863f7b3e9c82b0b9b5e7c0cc6be80..659475c1e44abea70ad9834d09c26e3c51d8e14f 100644 (file)
@@ -150,6 +150,9 @@ static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
 static int ipu_bus_format_to_map(u32 fmt)
 {
        switch (fmt) {
+       default:
+               WARN_ON(1);
+               /* fall-through */
        case MEDIA_BUS_FMT_RGB888_1X24:
                return IPU_DC_MAP_RGB24;
        case MEDIA_BUS_FMT_RGB565_1X16:
@@ -162,8 +165,6 @@ static int ipu_bus_format_to_map(u32 fmt)
                return IPU_DC_MAP_LVDS666;
        case MEDIA_BUS_FMT_BGR888_1X24:
                return IPU_DC_MAP_BGR24;
-       default:
-               return -EINVAL;
        }
 }
 
@@ -178,10 +179,6 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
        dc->di = ipu_di_get_num(di);
 
        map = ipu_bus_format_to_map(bus_format);
-       if (map < 0) {
-               dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
-               return map;
-       }
 
        /*
         * In interlaced mode we need more counters to create the asymmetric
index 359268e3a166851ba94303ee418f306817ba19ad..a8d87ddd8a1769e3a9b348f2f45b1272f99702df 100644 (file)
@@ -572,9 +572,6 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
        dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
                di->id, sig->mode.hactive, sig->mode.vactive);
 
-       if ((sig->mode.vsync_len == 0) || (sig->mode.hsync_len == 0))
-               return -EINVAL;
-
        dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
                clk_get_rate(di->clk_ipu),
                clk_get_rate(di->clk_di),
index 837b1ec228005ebf0b36a865d4f992588229dc0a..42705bb5aaa36b6e42d495032df6758ef9a0ddcd 100644 (file)
 #define DMFC_DP_CHAN_6B_24             16
 #define DMFC_DP_CHAN_6F_29             24
 
-#define DMFC_FIFO_SIZE_64              (3 << 3)
-#define DMFC_FIFO_SIZE_128             (2 << 3)
-#define DMFC_FIFO_SIZE_256             (1 << 3)
-#define DMFC_FIFO_SIZE_512             (0 << 3)
-
-#define DMFC_SEGMENT(x)                        ((x & 0x7) << 0)
-#define DMFC_BURSTSIZE_128             (0 << 6)
-#define DMFC_BURSTSIZE_64              (1 << 6)
-#define DMFC_BURSTSIZE_32              (2 << 6)
-#define DMFC_BURSTSIZE_16              (3 << 6)
-
 struct dmfc_channel_data {
        int             ipu_channel;
        unsigned long   channel_reg;
@@ -104,9 +93,6 @@ struct ipu_dmfc_priv;
 
 struct dmfc_channel {
        unsigned                        slots;
-       unsigned                        slotmask;
-       unsigned                        segment;
-       int                             burstsize;
        struct ipu_soc                  *ipu;
        struct ipu_dmfc_priv            *priv;
        const struct dmfc_channel_data  *data;
@@ -117,7 +103,6 @@ struct ipu_dmfc_priv {
        struct device *dev;
        struct dmfc_channel channels[DMFC_NUM_CHANNELS];
        struct mutex mutex;
-       unsigned long bandwidth_per_slot;
        void __iomem *base;
        int use_count;
 };
@@ -172,184 +157,6 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
 }
 EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
 
-static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
-               int segment, int burstsize)
-{
-       struct ipu_dmfc_priv *priv = dmfc->priv;
-       u32 val, field;
-
-       dev_dbg(priv->dev,
-                       "dmfc: using %d slots starting from segment %d for IPU channel %d\n",
-                       slots, segment, dmfc->data->ipu_channel);
-
-       switch (slots) {
-       case 1:
-               field = DMFC_FIFO_SIZE_64;
-               break;
-       case 2:
-               field = DMFC_FIFO_SIZE_128;
-               break;
-       case 4:
-               field = DMFC_FIFO_SIZE_256;
-               break;
-       case 8:
-               field = DMFC_FIFO_SIZE_512;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       switch (burstsize) {
-       case 16:
-               field |= DMFC_BURSTSIZE_16;
-               break;
-       case 32:
-               field |= DMFC_BURSTSIZE_32;
-               break;
-       case 64:
-               field |= DMFC_BURSTSIZE_64;
-               break;
-       case 128:
-               field |= DMFC_BURSTSIZE_128;
-               break;
-       }
-
-       field |= DMFC_SEGMENT(segment);
-
-       val = readl(priv->base + dmfc->data->channel_reg);
-
-       val &= ~(0xff << dmfc->data->shift);
-       val |= field << dmfc->data->shift;
-
-       writel(val, priv->base + dmfc->data->channel_reg);
-
-       dmfc->slots = slots;
-       dmfc->segment = segment;
-       dmfc->burstsize = burstsize;
-       dmfc->slotmask = ((1 << slots) - 1) << segment;
-
-       return 0;
-}
-
-static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv,
-               unsigned long bandwidth)
-{
-       int slots = 1;
-
-       while (slots * priv->bandwidth_per_slot < bandwidth)
-               slots *= 2;
-
-       return slots;
-}
-
-static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots)
-{
-       unsigned slotmask_need, slotmask_used = 0;
-       int i, segment = 0;
-
-       slotmask_need = (1 << slots) - 1;
-
-       for (i = 0; i < DMFC_NUM_CHANNELS; i++)
-               slotmask_used |= priv->channels[i].slotmask;
-
-       while (slotmask_need <= 0xff) {
-               if (!(slotmask_used & slotmask_need))
-                       return segment;
-
-               slotmask_need <<= 1;
-               segment++;
-       }
-
-       return -EBUSY;
-}
-
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc)
-{
-       struct ipu_dmfc_priv *priv = dmfc->priv;
-       int i;
-
-       dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n",
-                       dmfc->slots, dmfc->segment);
-
-       mutex_lock(&priv->mutex);
-
-       if (!dmfc->slots)
-               goto out;
-
-       dmfc->slotmask = 0;
-       dmfc->slots = 0;
-       dmfc->segment = 0;
-
-       for (i = 0; i < DMFC_NUM_CHANNELS; i++)
-               priv->channels[i].slotmask = 0;
-
-       for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
-               if (priv->channels[i].slots > 0) {
-                       priv->channels[i].segment =
-                               dmfc_find_slots(priv, priv->channels[i].slots);
-                       priv->channels[i].slotmask =
-                               ((1 << priv->channels[i].slots) - 1) <<
-                               priv->channels[i].segment;
-               }
-       }
-
-       for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
-               if (priv->channels[i].slots > 0)
-                       ipu_dmfc_setup_channel(&priv->channels[i],
-                                       priv->channels[i].slots,
-                                       priv->channels[i].segment,
-                                       priv->channels[i].burstsize);
-       }
-out:
-       mutex_unlock(&priv->mutex);
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth);
-
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
-               unsigned long bandwidth_pixel_per_second, int burstsize)
-{
-       struct ipu_dmfc_priv *priv = dmfc->priv;
-       int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second);
-       int segment = -1, ret = 0;
-
-       dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n",
-                       bandwidth_pixel_per_second / 1000000,
-                       dmfc->data->ipu_channel);
-
-       ipu_dmfc_free_bandwidth(dmfc);
-
-       mutex_lock(&priv->mutex);
-
-       if (slots > 8) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       /* For the MEM_BG channel, first try to allocate twice the slots */
-       if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
-               segment = dmfc_find_slots(priv, slots * 2);
-       else if (slots < 2)
-               /* Always allocate at least 128*4 bytes (2 slots) */
-               slots = 2;
-
-       if (segment >= 0)
-               slots *= 2;
-       else
-               segment = dmfc_find_slots(priv, slots);
-       if (segment < 0) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize);
-
-out:
-       mutex_unlock(&priv->mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
-
 void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
 {
        struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -384,7 +191,6 @@ EXPORT_SYMBOL_GPL(ipu_dmfc_get);
 
 void ipu_dmfc_put(struct dmfc_channel *dmfc)
 {
-       ipu_dmfc_free_bandwidth(dmfc);
 }
 EXPORT_SYMBOL_GPL(ipu_dmfc_put);
 
@@ -412,20 +218,15 @@ int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
                priv->channels[i].priv = priv;
                priv->channels[i].ipu = ipu;
                priv->channels[i].data = &dmfcdata[i];
-       }
-
-       writel(0x0, priv->base + DMFC_WR_CHAN);
-       writel(0x0, priv->base + DMFC_DP_CHAN);
 
-       /*
-        * We have a total bandwidth of clkrate * 4pixel divided
-        * into 8 slots.
-        */
-       priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8;
-
-       dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n",
-                       priv->bandwidth_per_slot / 1000000);
+               if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC ||
+                   dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC ||
+                   dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC)
+                       priv->channels[i].slots = 2;
+       }
 
+       writel(0x00000050, priv->base + DMFC_WR_CHAN);
+       writel(0x00005654, priv->base + DMFC_DP_CHAN);
        writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
        writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
        writel(0x00000003, priv->base + DMFC_GENERAL1);
index cbd7c986d926e072a234b248762e185fda1eebe4..5f962bfcb43c731c54ad8a6d4c54b564a9acd6f8 100644 (file)
@@ -30,6 +30,7 @@
 
 #define pr_fmt(fmt) "vga_switcheroo: " fmt
 
+#include <linux/apple-gmux.h>
 #include <linux/console.h>
 #include <linux/debugfs.h>
 #include <linux/fb.h>
@@ -51,9 +52,9 @@
  *
  * * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
  * * muxless: Dual GPUs but only one of them is connected to outputs.
- *     The other one is merely used to offload rendering, its results
- *     are copied over PCIe into the framebuffer. On Linux this is
- *     supported with DRI PRIME.
+ *   The other one is merely used to offload rendering, its results
+ *   are copied over PCIe into the framebuffer. On Linux this is
+ *   supported with DRI PRIME.
  *
  * Hybrid graphics started to appear in the late Naughties and were initially
  * all muxed. Newer laptops moved to a muxless architecture for cost reasons.
@@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev,
  *
  * Register vga client (GPU). Enable vga_switcheroo if another GPU and a
  * handler have already registered. The power state of the client is assumed
- * to be ON.
+ * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called
+ * to ensure that all prerequisites are met.
  *
  * Return: 0 on success, -ENOMEM on memory allocation error.
  */
@@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
  * @id: client identifier
  *
  * Register audio client (audio device on a GPU). The power state of the
- * client is assumed to be ON.
+ * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer()
+ * shall be called to ensure that all prerequisites are met.
  *
  * Return: 0 on success, -ENOMEM on memory allocation error.
  */
@@ -375,6 +378,33 @@ find_active_client(struct list_head *head)
        return NULL;
 }
 
+/**
+ * vga_switcheroo_client_probe_defer() - whether to defer probing a given client
+ * @pdev: client pci device
+ *
+ * Determine whether any prerequisites are not fulfilled to probe a given
+ * client. Drivers shall invoke this early on in their ->probe callback
+ * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not
+ * register the client ere thou hast called this.
+ *
+ * Return: %true if probing should be deferred, otherwise %false.
+ */
+bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
+{
+       if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+               /*
+                * apple-gmux is needed on pre-retina MacBook Pro
+                * to probe the panel if pdev is the inactive GPU.
+                */
+               if (apple_gmux_present() && pdev != vga_default_device() &&
+                   !vgasr_priv.handler_flags)
+                       return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
+
 /**
  * vga_switcheroo_get_client_state() - obtain power state of a given client
  * @pdev: client pci device
@@ -530,21 +560,21 @@ EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
  * * OFF: Power off the device not in use.
  * * ON: Power on the device not in use.
  * * IGD: Switch to the integrated graphics device.
- *     Power on the integrated GPU if necessary, power off the discrete GPU.
- *     Prerequisite is that no user space processes (e.g. Xorg, alsactl)
- *     have opened device files of the GPUs or the audio client. If the
- *     switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
- *     and /dev/snd/controlC1 to identify processes blocking the switch.
+ *   Power on the integrated GPU if necessary, power off the discrete GPU.
+ *   Prerequisite is that no user space processes (e.g. Xorg, alsactl)
+ *   have opened device files of the GPUs or the audio client. If the
+ *   switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
+ *   and /dev/snd/controlC1 to identify processes blocking the switch.
  * * DIS: Switch to the discrete graphics device.
  * * DIGD: Delayed switch to the integrated graphics device.
- *     This will perform the switch once the last user space process has
- *     closed the device files of the GPUs and the audio client.
+ *   This will perform the switch once the last user space process has
+ *   closed the device files of the GPUs and the audio client.
  * * DDIS: Delayed switch to the discrete graphics device.
  * * MIGD: Mux-only switch to the integrated graphics device.
- *     Does not remap console or change the power state of either gpu.
- *     If the integrated GPU is currently off, the screen will turn black.
- *     If it is on, the screen will show whatever happens to be in VRAM.
- *     Either way, the user has to blindly enter the command to switch back.
+ *   Does not remap console or change the power state of either gpu.
+ *   If the integrated GPU is currently off, the screen will turn black.
+ *   If it is on, the screen will show whatever happens to be in VRAM.
+ *   Either way, the user has to blindly enter the command to switch back.
  * * MDIS: Mux-only switch to the discrete graphics device.
  *
  * For GPUs whose power state is controlled by the driver's runtime pm,
index aad8c162a825dc60516700b1bd8caf8efeec4244..0cd4f72162394b5652fc529681cfbc8774b50a36 100644 (file)
@@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
        struct elo_priv *priv = hid_get_drvdata(hdev);
 
        hid_hw_stop(hdev);
-       flush_workqueue(wq);
+       cancel_delayed_work_sync(&priv->work);
        kfree(priv);
 }
 
index c741f5e50a6681cd0b6267fc04c5ca11f1b0a1b5..fb6f1f4472795700c1f12889c405b9350b4ebcb0 100644 (file)
@@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
 #define MT_QUIRK_ALWAYS_VALID          (1 << 4)
 #define MT_QUIRK_VALID_IS_INRANGE      (1 << 5)
 #define MT_QUIRK_VALID_IS_CONFIDENCE   (1 << 6)
+#define MT_QUIRK_CONFIDENCE            (1 << 7)
 #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE   (1 << 8)
 #define MT_QUIRK_NO_AREA               (1 << 9)
 #define MT_QUIRK_IGNORE_DUPLICATES     (1 << 10)
@@ -78,6 +79,7 @@ struct mt_slot {
        __s32 contactid;        /* the device ContactID assigned to this slot */
        bool touch_state;       /* is the touch valid? */
        bool inrange_state;     /* is the finger in proximity of the sensor? */
+       bool confidence_state;  /* is the touch made by a finger? */
 };
 
 struct mt_class {
@@ -503,10 +505,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        return 1;
                case HID_DG_CONFIDENCE:
                        if (cls->name == MT_CLS_WIN_8 &&
-                               field->application == HID_DG_TOUCHPAD) {
-                               cls->quirks &= ~MT_QUIRK_ALWAYS_VALID;
-                               cls->quirks |= MT_QUIRK_VALID_IS_CONFIDENCE;
-                       }
+                               field->application == HID_DG_TOUCHPAD)
+                               cls->quirks |= MT_QUIRK_CONFIDENCE;
                        mt_store_field(usage, td, hi);
                        return 1;
                case HID_DG_TIPSWITCH:
@@ -619,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
                return;
 
        if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
+               int active;
                int slotnum = mt_compute_slot(td, input);
                struct mt_slot *s = &td->curdata;
                struct input_mt *mt = input->mt;
@@ -633,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
                                return;
                }
 
+               if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
+                       s->confidence_state = 1;
+               active = (s->touch_state || s->inrange_state) &&
+                                                       s->confidence_state;
+
                input_mt_slot(input, slotnum);
-               input_mt_report_slot_state(input, MT_TOOL_FINGER,
-                       s->touch_state || s->inrange_state);
-               if (s->touch_state || s->inrange_state) {
+               input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
+               if (active) {
                        /* this finger is in proximity of the sensor */
                        int wide = (s->w > s->h);
                        /* divided by two to match visual scale of touch */
@@ -701,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
                        td->curdata.touch_state = value;
                        break;
                case HID_DG_CONFIDENCE:
+                       if (quirks & MT_QUIRK_CONFIDENCE)
+                               td->curdata.confidence_state = value;
                        if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
                                td->curvalid = value;
                        break;
@@ -1401,6 +1408,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
                        USB_DEVICE_ID_NOVATEK_PCT) },
 
+       /* Ntrig Panel */
+       { .driver_data = MT_CLS_NSMU,
+               HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+                       USB_VENDOR_ID_NTRIG, 0x1b05) },
+
        /* PixArt optical touch screen */
        { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
                MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
index 2f1ddca6f2e0a3a4712bce434d5b29e1556e216c..700145b1508894f30a018aef278d15cfc458ef3a 100644 (file)
@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
                                        goto inval;
                        } else if (uref->usage_index >= field->report_count)
                                goto inval;
-
-                       else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
-                                (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
-                                 uref->usage_index + uref_multi->num_values > field->report_count))
-                               goto inval;
                }
 
+               if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+                   (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+                    uref->usage_index + uref_multi->num_values > field->report_count))
+                       goto inval;
+
                switch (cmd) {
                case HIDIOCGUSAGE:
                        uref->value = field->value[uref->usage_index];
index c43318d3416e20a4d027a32402b1cf751718b8c6..2ac87d553e22f67ebaca32d62fb76375e3d22bdc 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/sched.h>
+#include <linux/ctype.h>
 
 #include <linux/i8k.h>
 
 
 static DEFINE_MUTEX(i8k_mutex);
 static char bios_version[4];
+static char bios_machineid[16];
 static struct device *i8k_hwmon_dev;
 static u32 i8k_hwmon_flags;
 static uint i8k_fan_mult = I8K_FAN_MULT;
 static uint i8k_pwm_mult;
 static uint i8k_fan_max = I8K_FAN_HIGH;
+static bool disallow_fan_type_call;
 
 #define I8K_HWMON_HAVE_TEMP1   (1 << 0)
 #define I8K_HWMON_HAVE_TEMP2   (1 << 1)
@@ -94,13 +97,13 @@ module_param(ignore_dmi, bool, 0);
 MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
 
 #if IS_ENABLED(CONFIG_I8K)
-static bool restricted;
+static bool restricted = true;
 module_param(restricted, bool, 0);
-MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
+MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
 
 static bool power_status;
 module_param(power_status, bool, 0600);
-MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
+MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
 #endif
 
 static uint fan_mult;
@@ -235,14 +238,28 @@ static int i8k_get_fan_speed(int fan)
 /*
  * Read the fan type.
  */
-static int i8k_get_fan_type(int fan)
+static int _i8k_get_fan_type(int fan)
 {
        struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
 
+       if (disallow_fan_type_call)
+               return -EINVAL;
+
        regs.ebx = fan & 0xff;
        return i8k_smm(&regs) ? : regs.eax & 0xff;
 }
 
+static int i8k_get_fan_type(int fan)
+{
+       /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
+       static int types[2] = { INT_MIN, INT_MIN };
+
+       if (types[fan] == INT_MIN)
+               types[fan] = _i8k_get_fan_type(fan);
+
+       return types[fan];
+}
+
 /*
  * Read the fan nominal rpm for specific fan speed.
  */
@@ -387,14 +404,20 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
 
        switch (cmd) {
        case I8K_BIOS_VERSION:
+               if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) ||
+                   !isdigit(bios_version[2]))
+                       return -EINVAL;
+
                val = (bios_version[0] << 16) |
                                (bios_version[1] << 8) | bios_version[2];
                break;
 
        case I8K_MACHINE_ID:
-               memset(buff, 0, 16);
-               strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
-                       sizeof(buff));
+               if (restricted && !capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               memset(buff, 0, sizeof(buff));
+               strlcpy(buff, bios_machineid, sizeof(buff));
                break;
 
        case I8K_FN_STATUS:
@@ -511,7 +534,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
        seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
                   I8K_PROC_FMT,
                   bios_version,
-                  i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+                  (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
                   cpu_temp,
                   left_fan, right_fan, left_speed, right_speed,
                   ac_power, fn_key);
@@ -718,6 +741,9 @@ static struct attribute *i8k_attrs[] = {
 static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
                              int index)
 {
+       if (disallow_fan_type_call &&
+           (index == 9 || index == 12))
+               return 0;
        if (index >= 0 && index <= 1 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
                return 0;
@@ -767,13 +793,17 @@ static int __init i8k_init_hwmon(void)
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
 
-       /* First fan attributes, if fan type is OK */
-       err = i8k_get_fan_type(0);
+       /* First fan attributes, if fan status or type is OK */
+       err = i8k_get_fan_status(0);
+       if (err < 0)
+               err = i8k_get_fan_type(0);
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
 
-       /* Second fan attributes, if fan type is OK */
-       err = i8k_get_fan_type(1);
+       /* Second fan attributes, if fan status or type is OK */
+       err = i8k_get_fan_status(1);
+       if (err < 0)
+               err = i8k_get_fan_type(1);
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
 
@@ -929,12 +959,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
-static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+/*
+ * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
+ * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
+ * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
+ */
+static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
        {
-               /*
-                * CPU fan speed going up and down on Dell Studio XPS 8000
-                * for unknown reasons.
-                */
                .ident = "Dell Studio XPS 8000",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -942,16 +974,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
                },
        },
        {
-               /*
-                * CPU fan speed going up and down on Dell Studio XPS 8100
-                * for unknown reasons.
-                */
                .ident = "Dell Studio XPS 8100",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
                },
        },
+       {
+               .ident = "Dell Inspiron 580",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
+               },
+       },
        { }
 };
 
@@ -966,8 +1001,7 @@ static int __init i8k_probe(void)
        /*
         * Get DMI information
         */
-       if (!dmi_check_system(i8k_dmi_table) ||
-           dmi_check_system(i8k_blacklist_dmi_table)) {
+       if (!dmi_check_system(i8k_dmi_table)) {
                if (!ignore_dmi && !force)
                        return -ENODEV;
 
@@ -978,8 +1012,13 @@ static int __init i8k_probe(void)
                        i8k_get_dmi_data(DMI_BIOS_VERSION));
        }
 
+       if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
+               disallow_fan_type_call = true;
+
        strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
                sizeof(bios_version));
+       strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+               sizeof(bios_machineid));
 
        /*
         * Get SMM Dell signature
index eb97a9241d171433aa5e4ce60395718fec97af06..15aa49d082c45752fd2751f9229d16bc93196868 100644 (file)
@@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data)
  */
 static int read_registers(struct fam15h_power_data *data)
 {
-       int this_cpu, ret, cpu;
        int core, this_core;
        cpumask_var_t mask;
+       int ret, cpu;
 
        ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
        if (!ret)
@@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data)
        memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
 
        get_online_cpus();
-       this_cpu = smp_processor_id();
 
        /*
         * Choose the first online core of each compute unit, and then
@@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data)
                cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
        }
 
-       if (cpumask_test_cpu(this_cpu, mask))
-               do_read_registers_on_cu(data);
+       on_each_cpu_mask(mask, do_read_registers_on_cu, data, true);
 
-       smp_call_function_many(mask, do_read_registers_on_cu, data, true);
        put_online_cpus();
-
        free_cpumask_var(mask);
 
        return 0;
index c9ff08dbe10cefcf8731c1360370edd037307d43..e30a5939dc0d5566ae76ef31e790a578d8b0877c 100644 (file)
@@ -375,7 +375,7 @@ struct lm90_data {
        int kind;
        u32 flags;
 
-       int update_interval;    /* in milliseconds */
+       unsigned int update_interval; /* in milliseconds */
 
        u8 config_orig;         /* Original configuration register value */
        u8 convrate_orig;       /* Original conversion rate register value */
index 847d1b5f2c13b997f46d5a565bfba542fdaaee4e..688be9e060fc547f57a259bae61a49579edfa8e8 100644 (file)
@@ -300,13 +300,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
        if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
                /*
                 * The trace run will continue with the same allocated trace
-                * buffer. As such zero-out the buffer so that we don't end
-                * up with stale data.
-                *
-                * Since the tracer is still enabled drvdata::buf
-                * can't be NULL.
+                * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
+                * so we don't have to explicitly clear it. Also, since the
+                * tracer is still enabled drvdata::buf can't be NULL.
                 */
-               memset(drvdata->buf, 0, drvdata->size);
                tmc_etr_enable_hw(drvdata);
        } else {
                /*
@@ -315,7 +312,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
                 */
                vaddr = drvdata->vaddr;
                paddr = drvdata->paddr;
-               drvdata->buf = NULL;
+               drvdata->buf = drvdata->vaddr = NULL;
        }
 
        drvdata->reading = false;
index 5443d03a1eec4b09feedd4be794d7ef5d2c2dca4..d08d1ab9bba5d7e1c0772d48b6d43802057e323d 100644 (file)
@@ -385,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev,
        int i;
        bool found = false;
        struct coresight_node *node;
-       struct coresight_connection *conn;
 
        /* An activated sink has been found.  Enqueue the element */
        if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -394,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev,
 
        /* Not a sink - recursively explore each port found on this element */
        for (i = 0; i < csdev->nr_outport; i++) {
-               conn = &csdev->conns[i];
-               if (_coresight_build_path(conn->child_dev, path) == 0) {
+               struct coresight_device *child_dev = csdev->conns[i].child_dev;
+
+               if (child_dev && _coresight_build_path(child_dev, path) == 0) {
                        found = true;
                        break;
                }
@@ -425,6 +425,7 @@ out:
 struct list_head *coresight_build_path(struct coresight_device *csdev)
 {
        struct list_head *path;
+       int rc;
 
        path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
        if (!path)
@@ -432,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev)
 
        INIT_LIST_HEAD(path);
 
-       if (_coresight_build_path(csdev, path)) {
+       rc = _coresight_build_path(csdev, path);
+       if (rc) {
                kfree(path);
-               path = NULL;
+               return ERR_PTR(rc);
        }
 
        return path;
@@ -507,8 +509,9 @@ int coresight_enable(struct coresight_device *csdev)
                goto out;
 
        path = coresight_build_path(csdev);
-       if (!path) {
+       if (IS_ERR(path)) {
                pr_err("building path(s) failed\n");
+               ret = PTR_ERR(path);
                goto out;
        }
 
index 64b1208bca5e15af2295e9b26b9581af67f474c4..4a60ad2147479081e2fd8fd1b29f35f4dbb58cd6 100644 (file)
@@ -245,6 +245,13 @@ struct i801_priv {
        struct platform_device *mux_pdev;
 #endif
        struct platform_device *tco_pdev;
+
+       /*
+        * If set to true the host controller registers are reserved for
+        * ACPI AML use. Protected by acpi_lock.
+        */
+       bool acpi_reserved;
+       struct mutex acpi_lock;
 };
 
 #define FEATURE_SMBUS_PEC      (1 << 0)
@@ -718,6 +725,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
        int ret = 0, xact = 0;
        struct i801_priv *priv = i2c_get_adapdata(adap);
 
+       mutex_lock(&priv->acpi_lock);
+       if (priv->acpi_reserved) {
+               mutex_unlock(&priv->acpi_lock);
+               return -EBUSY;
+       }
+
        pm_runtime_get_sync(&priv->pci_dev->dev);
 
        hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
@@ -820,6 +833,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
 out:
        pm_runtime_mark_last_busy(&priv->pci_dev->dev);
        pm_runtime_put_autosuspend(&priv->pci_dev->dev);
+       mutex_unlock(&priv->acpi_lock);
        return ret;
 }
 
@@ -1257,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv)
        priv->tco_pdev = pdev;
 }
 
+#ifdef CONFIG_ACPI
+static acpi_status
+i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+                    u64 *value, void *handler_context, void *region_context)
+{
+       struct i801_priv *priv = handler_context;
+       struct pci_dev *pdev = priv->pci_dev;
+       acpi_status status;
+
+       /*
+        * Once BIOS AML code touches the OpRegion we warn and inhibit any
+        * further access from the driver itself. This device is now owned
+        * by the system firmware.
+        */
+       mutex_lock(&priv->acpi_lock);
+
+       if (!priv->acpi_reserved) {
+               priv->acpi_reserved = true;
+
+               dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
+               dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
+
+               /*
+                * BIOS is accessing the host controller so prevent it from
+                * suspending automatically from now on.
+                */
+               pm_runtime_get_sync(&pdev->dev);
+       }
+
+       if ((function & ACPI_IO_MASK) == ACPI_READ)
+               status = acpi_os_read_port(address, (u32 *)value, bits);
+       else
+               status = acpi_os_write_port(address, (u32)*value, bits);
+
+       mutex_unlock(&priv->acpi_lock);
+
+       return status;
+}
+
+static int i801_acpi_probe(struct i801_priv *priv)
+{
+       struct acpi_device *adev;
+       acpi_status status;
+
+       adev = ACPI_COMPANION(&priv->pci_dev->dev);
+       if (adev) {
+               status = acpi_install_address_space_handler(adev->handle,
+                               ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
+                               NULL, priv);
+               if (ACPI_SUCCESS(status))
+                       return 0;
+       }
+
+       return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
+}
+
+static void i801_acpi_remove(struct i801_priv *priv)
+{
+       struct acpi_device *adev;
+
+       adev = ACPI_COMPANION(&priv->pci_dev->dev);
+       if (!adev)
+               return;
+
+       acpi_remove_address_space_handler(adev->handle,
+               ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
+
+       mutex_lock(&priv->acpi_lock);
+       if (priv->acpi_reserved)
+               pm_runtime_put(&priv->pci_dev->dev);
+       mutex_unlock(&priv->acpi_lock);
+}
+#else
+static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
+static inline void i801_acpi_remove(struct i801_priv *priv) { }
+#endif
+
 static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        unsigned char temp;
@@ -1274,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        priv->adapter.dev.parent = &dev->dev;
        ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
        priv->adapter.retries = 3;
+       mutex_init(&priv->acpi_lock);
 
        priv->pci_dev = dev;
        switch (dev->device) {
@@ -1336,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
                return -ENODEV;
        }
 
-       err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
-       if (err) {
+       if (i801_acpi_probe(priv))
                return -ENODEV;
-       }
 
        err = pcim_iomap_regions(dev, 1 << SMBBAR,
                                 dev_driver_string(&dev->dev));
@@ -1348,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        "Failed to request SMBus region 0x%lx-0x%Lx\n",
                        priv->smba,
                        (unsigned long long)pci_resource_end(dev, SMBBAR));
+               i801_acpi_remove(priv);
                return err;
        }
 
@@ -1412,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        err = i2c_add_adapter(&priv->adapter);
        if (err) {
                dev_err(&dev->dev, "Failed to add SMBus adapter\n");
+               i801_acpi_remove(priv);
                return err;
        }
 
@@ -1438,6 +1530,7 @@ static void i801_remove(struct pci_dev *dev)
 
        i801_del_mux(priv);
        i2c_del_adapter(&priv->adapter);
+       i801_acpi_remove(priv);
        pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
 
        platform_device_unregister(priv->tco_pdev);
index aa5f01efd826693720a19d662058b953a8b2fbeb..30ae35146723a466afc20767d5f11853e040faf6 100644 (file)
@@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
                return result;
 
        for (i = 0; i < length; i++) {
-               /* for the last byte TWSI_CTL_AAK must not be set */
-               if (i + 1 == length)
+               /*
+                * For the last byte to receive TWSI_CTL_AAK must not be set.
+                *
+                * A special case is I2C_M_RECV_LEN where we don't know the
+                * additional length yet. If recv_len is set we assume we're
+                * not reading the final byte and therefore need to set
+                * TWSI_CTL_AAK.
+                */
+               if ((i + 1 == length) && !(recv_len && i == 0))
                        final_read = true;
 
                /* clear iflg to allow next event */
@@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
 
                data[i] = octeon_i2c_data_read(i2c);
                if (recv_len && i == 0) {
-                       if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
-                               dev_err(i2c->dev,
-                                       "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
-                                       __func__, data[i]);
+                       if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
                                return -EPROTO;
-                       }
                        length += data[i];
                }
 
index cc6439ab3f714145f4d86c49c3d3c79716a058b1..041050edd80991713d9bb5a776c9a1d402f23bf5 100644 (file)
@@ -1268,6 +1268,8 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
                }
        }
 
+       idx = 0;
+
        do {
                if (msgs[idx].len == 0) {
                        ret = -EINVAL;
index 445398c314a3303a41ba51313bbcbfd3a40c421c..b126dbaa47e37014acaf78ba528a6e177bffa9d8 100644 (file)
@@ -912,7 +912,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        ret = tegra_i2c_init(i2c_dev);
        if (ret) {
                dev_err(&pdev->dev, "Failed to initialize i2c controller");
-               goto unprepare_div_clk;
+               goto disable_div_clk;
        }
 
        ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
index e33022e2d459f18b57027337c9c9de9243488c07..6e5fac6a5262a0824d69ebe534b18fd855d35922 100644 (file)
@@ -56,9 +56,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
  * The board info passed can safely be __initdata, but be careful of embedded
  * pointers (for platform_data, functions, etc) since that won't be copied.
  */
-int __init
-i2c_register_board_info(int busnum,
-       struct i2c_board_info const *info, unsigned len)
+int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
 {
        int status;
 
index 6773cadf7c9fdd09248bae67e5d0197e16e97496..c6a90b4a9c626dcf4fdf65e51e2f01b5b6ac4968 100644 (file)
@@ -145,7 +145,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
                mux->data.idle_in_use = true;
 
        /* map address from "reg" if exists */
-       if (of_address_to_resource(np, 0, &res)) {
+       if (of_address_to_resource(np, 0, &res) == 0) {
                mux->data.reg_size = resource_size(&res);
                mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
                if (IS_ERR(mux->data.reg))
@@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = {
        .remove = i2c_mux_reg_remove,
        .driver = {
                .name   = "i2c-mux-reg",
+               .of_match_table = of_match_ptr(i2c_mux_reg_of_match),
        },
 };
 
index 923f56598d4b51a84ee7b5b322c2c9bfefe4f0a5..3a9f106787d28b2f85248402b68628fd02604460 100644 (file)
@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
 
        mutex_lock(&st->buf_lock);
        ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
-       if (ret)
+       if (ret < 0)
                goto error_ret;
        st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
        st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
                break;
        case IIO_CHAN_INFO_SCALE:
                ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
-               if (ret)
+               if (ret < 0)
                        goto error_ret;
                *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
                ret = IIO_VAL_INT_PLUS_MICRO;
index a1e642ee13d6a4f8af5eb0f6a9c92bafe51460d9..7fddc137e91ed7942e91a42a70f19f6320cfc0df 100644 (file)
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
 
 int st_accel_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
 }
 
index dc73f2d85e6d102ef719557e4db2b08c68901ab5..4d95bfc4786cafc6de2fcc659950860596193877 100644 (file)
@@ -741,6 +741,7 @@ static const struct iio_info accel_info = {
 static const struct iio_trigger_ops st_accel_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
 #else
index 21e19b60e2b98305b209a648690e8f18f64b40f9..2123f0ac2e2a47b608eedc6095f1681802edfcd9 100644 (file)
@@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
 
        st = iio_priv(indio_dev);
 
-       st->reg = devm_regulator_get(&spi->dev, "vref");
-       if (!IS_ERR_OR_NULL(st->reg)) {
+       st->reg = devm_regulator_get_optional(&spi->dev, "vref");
+       if (!IS_ERR(st->reg)) {
                ret = regulator_enable(st->reg);
                if (ret)
                        return ret;
@@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
 
                st->vref_mv = ret / 1000;
        } else {
+               /* Any other error indicates that the regulator does exist */
+               if (PTR_ERR(st->reg) != -ENODEV)
+                       return PTR_ERR(st->reg);
                /* Use internal reference */
                st->vref_mv = 2500;
        }
index c55898543a47d9a2db596e89f59c5416b19bb0b7..f1693dbebb8ac23359035786ef26b6607191058d 100644 (file)
@@ -57,31 +57,20 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
        struct iio_poll_func *pf = p;
        struct iio_dev *indio_dev = pf->indio_dev;
        struct st_sensor_data *sdata = iio_priv(indio_dev);
+       s64 timestamp;
 
-       /* If we have a status register, check if this IRQ came from us */
-       if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
-               u8 status;
-
-               len = sdata->tf->read_byte(&sdata->tb, sdata->dev,
-                          sdata->sensor_settings->drdy_irq.addr_stat_drdy,
-                          &status);
-               if (len < 0)
-                       dev_err(sdata->dev, "could not read channel status\n");
-
-               /*
-                * If this was not caused by any channels on this sensor,
-                * return IRQ_NONE
-                */
-               if (!(status & (u8)indio_dev->active_scan_mask[0]))
-                       return IRQ_NONE;
-       }
+       /* If we do timetamping here, do it before reading the values */
+       if (sdata->hw_irq_trigger)
+               timestamp = sdata->hw_timestamp;
+       else
+               timestamp = iio_get_time_ns();
 
        len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
        if (len < 0)
                goto st_sensors_get_buffer_element_error;
 
        iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
-               pf->timestamp);
+                                          timestamp);
 
 st_sensors_get_buffer_element_error:
        iio_trigger_notify_done(indio_dev->trig);
index dffe006921694840c24356f44bd400299e33b8f3..9e59c90f6a8d7baa1ee091ea588b75a39414032c 100644 (file)
@@ -363,6 +363,11 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
        if (err < 0)
                return err;
 
+       /* Disable DRDY, this might be still be enabled after reboot. */
+       err = st_sensors_set_dataready_irq(indio_dev, false);
+       if (err < 0)
+               return err;
+
        if (sdata->current_fullscale) {
                err = st_sensors_set_fullscale(indio_dev,
                                                sdata->current_fullscale->num);
@@ -424,6 +429,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
        else
                drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
 
+       /* Flag to the poll function that the hardware trigger is in use */
+       sdata->hw_irq_trigger = enable;
+
        /* Enable/Disable the interrupt generator for data ready. */
        err = st_sensors_write_data_with_mask(indio_dev,
                                        sdata->sensor_settings->drdy_irq.addr,
index da72279fcf99cbd6eac0f48c8965837566a8cdc1..296e4ff19ae8d9cb4d40a0a852ecdecefb2e127f 100644 (file)
 #include <linux/iio/common/st_sensors.h>
 #include "st_sensors_core.h"
 
+/**
+ * st_sensors_irq_handler() - top half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_handler(int irq, void *p)
+{
+       struct iio_trigger *trig = p;
+       struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+       struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+       /* Get the time stamp as close in time as possible */
+       sdata->hw_timestamp = iio_get_time_ns();
+       return IRQ_WAKE_THREAD;
+}
+
+/**
+ * st_sensors_irq_thread() - bottom half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_thread(int irq, void *p)
+{
+       struct iio_trigger *trig = p;
+       struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+       struct st_sensor_data *sdata = iio_priv(indio_dev);
+       int ret;
+
+       /*
+        * If this trigger is backed by a hardware interrupt and we have a
+        * status register, check if this IRQ came from us
+        */
+       if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
+               u8 status;
+
+               ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+                          sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+                          &status);
+               if (ret < 0) {
+                       dev_err(sdata->dev, "could not read channel status\n");
+                       goto out_poll;
+               }
+               /*
+                * the lower bits of .active_scan_mask[0] is directly mapped
+                * to the channels on the sensor: either bit 0 for
+                * one-dimensional sensors, or e.g. x,y,z for accelerometers,
+                * gyroscopes or magnetometers. No sensor use more than 3
+                * channels, so cut the other status bits here.
+                */
+               status &= 0x07;
+
+               /*
+                * If this was not caused by any channels on this sensor,
+                * return IRQ_NONE
+                */
+               if (!indio_dev->active_scan_mask)
+                       return IRQ_NONE;
+               if (!(status & (u8)indio_dev->active_scan_mask[0]))
+                       return IRQ_NONE;
+       }
+
+out_poll:
+       /* It's our IRQ: proceed to handle the register polling */
+       iio_trigger_poll_chained(p);
+       return IRQ_HANDLED;
+}
+
 int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                                const struct iio_trigger_ops *trigger_ops)
 {
@@ -30,6 +97,10 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                return -ENOMEM;
        }
 
+       iio_trigger_set_drvdata(sdata->trig, indio_dev);
+       sdata->trig->ops = trigger_ops;
+       sdata->trig->dev.parent = sdata->dev;
+
        irq = sdata->get_irq_data_ready(indio_dev);
        irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
        /*
@@ -77,9 +148,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
            sdata->sensor_settings->drdy_irq.addr_stat_drdy)
                irq_trig |= IRQF_SHARED;
 
-       err = request_threaded_irq(irq,
-                       iio_trigger_generic_data_rdy_poll,
-                       NULL,
+       /* Let's create an interrupt thread masking the hard IRQ here */
+       irq_trig |= IRQF_ONESHOT;
+
+       err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+                       st_sensors_irq_handler,
+                       st_sensors_irq_thread,
                        irq_trig,
                        sdata->trig->name,
                        sdata->trig);
@@ -88,10 +162,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                goto iio_trigger_free;
        }
 
-       iio_trigger_set_drvdata(sdata->trig, indio_dev);
-       sdata->trig->ops = trigger_ops;
-       sdata->trig->dev.parent = sdata->dev;
-
        err = iio_trigger_register(sdata->trig);
        if (err < 0) {
                dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
@@ -119,6 +189,18 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
 }
 EXPORT_SYMBOL(st_sensors_deallocate_trigger);
 
+int st_sensors_validate_device(struct iio_trigger *trig,
+                              struct iio_dev *indio_dev)
+{
+       struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+
+       if (indio != indio_dev)
+               return -EINVAL;
+
+       return 0;
+}
+EXPORT_SYMBOL(st_sensors_validate_device);
+
 MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
 MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
 MODULE_LICENSE("GPL v2");
index e63b957c985f4ebecbeaca10ac68867419e6900c..f7c71da42f15e28127492c8894a856a3a04da0e3 100644 (file)
@@ -247,7 +247,7 @@ config MCP4922
 
 config STX104
        tristate "Apex Embedded Systems STX104 DAC driver"
-       depends on X86 && ISA
+       depends on X86 && ISA_BUS_API
        help
          Say yes here to build support for the 2-channel DAC on the Apex
          Embedded Systems STX104 integrated analog PC/104 card. The base port
index 948f600e7059fb8837662a6aa01f3e72a3583100..69bde59098542daf480561c1bb113919736ccb2f 100644 (file)
@@ -525,7 +525,7 @@ static int ad5592r_alloc_channels(struct ad5592r_state *st)
 
        device_for_each_child_node(st->dev, child) {
                ret = fwnode_property_read_u32(child, "reg", &reg);
-               if (ret || reg > ARRAY_SIZE(st->channel_modes))
+               if (ret || reg >= ARRAY_SIZE(st->channel_modes))
                        continue;
 
                ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
index d67b17b6a7aab1abd0077e297c5e0f9769eee893..a5377044e42f0ccf128f1fd9bfb0ce02bba0a0a8 100644 (file)
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
 
 int st_gyro_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
 }
 
index 52a3c87c375ca62c027f66aa3de89d2c01c6fae7..a8012955a1f6d47130f703ced0df6a5e6c424407 100644 (file)
@@ -409,6 +409,7 @@ static const struct iio_info gyro_info = {
 static const struct iio_trigger_ops st_gyro_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
 #else
index 3be6d209a1595961ddd19dbcf8fc240f07617a77..11535911a5c697fa359aec0a4626f7a80b3d1e12 100644 (file)
@@ -165,10 +165,8 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
        struct am2315_sensor_data sensor_data;
 
        ret = am2315_read_data(data, &sensor_data);
-       if (ret < 0) {
-               mutex_unlock(&data->lock);
+       if (ret < 0)
                goto err;
-       }
 
        mutex_lock(&data->lock);
        if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
index fa47676131733744859f25b1bfa0f2183ac8f61d..a03832a5fc95aafd81b075710d32d57acf9f2677 100644 (file)
@@ -55,7 +55,7 @@ static const struct {
        },
        { /* IIO_HUMIDITYRELATIVE channel */
                .shift = 8,
-               .mask = 2,
+               .mask = 3,
        },
 };
 
@@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
                dev_err(&client->dev, "cannot read high byte measurement");
                return ret;
        }
-       val = ret << 6;
+       val = ret << 8;
 
        ret = i2c_smbus_read_byte(client);
        if (ret < 0) {
                dev_err(&client->dev, "cannot read low byte measurement");
                return ret;
        }
-       val |= ret >> 2;
+       val |= ret;
 
        return val;
 }
@@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
                return IIO_VAL_INT_PLUS_MICRO;
        case IIO_CHAN_INFO_SCALE:
                if (chan->type == IIO_TEMP) {
-                       *val = 165;
-                       *val2 = 65536 >> 2;
+                       *val = 165000;
+                       *val2 = 65536;
                        return IIO_VAL_FRACTIONAL;
                } else {
-                       *val = 0;
-                       *val2 = 10000;
-                       return IIO_VAL_INT_PLUS_MICRO;
+                       *val = 100;
+                       *val2 = 65536;
+                       return IIO_VAL_FRACTIONAL;
                }
                break;
        case IIO_CHAN_INFO_OFFSET:
-               *val = -3971;
-               *val2 = 879096;
+               *val = -15887;
+               *val2 = 515151;
                return IIO_VAL_INT_PLUS_MICRO;
        default:
                return -EINVAL;
index 0bf92b06d7d87a1cfb2ff85ce9662abb5ec07fd3..b8a290ec984e33f79aaa8b546f543709e579b7b3 100644 (file)
@@ -209,11 +209,11 @@ static const struct  bmi160_scale_item bmi160_scale_table[] = {
 };
 
 static const struct bmi160_odr bmi160_accel_odr[] = {
-       {0x01, 0, 78125},
-       {0x02, 1, 5625},
-       {0x03, 3, 125},
-       {0x04, 6, 25},
-       {0x05, 12, 5},
+       {0x01, 0, 781250},
+       {0x02, 1, 562500},
+       {0x03, 3, 125000},
+       {0x04, 6, 250000},
+       {0x05, 12, 500000},
        {0x06, 25, 0},
        {0x07, 50, 0},
        {0x08, 100, 0},
@@ -229,7 +229,7 @@ static const struct bmi160_odr bmi160_gyro_odr[] = {
        {0x08, 100, 0},
        {0x09, 200, 0},
        {0x0A, 400, 0},
-       {0x0B, 8000, 0},
+       {0x0B, 800, 0},
        {0x0C, 1600, 0},
        {0x0D, 3200, 0},
 };
@@ -364,8 +364,8 @@ int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
 
        return regmap_update_bits(data->regmap,
                                  bmi160_regs[t].config,
-                                 bmi160_odr_table[t].tbl[i].bits,
-                                 bmi160_regs[t].config_odr_mask);
+                                 bmi160_regs[t].config_odr_mask,
+                                 bmi160_odr_table[t].tbl[i].bits);
 }
 
 static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
index f62b8bd9ad7ef0fb440e8d28a55061db4ffb3496..dd6fc6d21f9d0c6057b6b5029800eef2fa82dacb 100644 (file)
@@ -56,6 +56,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
        int i;
        acpi_status status;
        union acpi_object *cpm;
+       int ret;
 
        status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer);
        if (ACPI_FAILURE(status))
@@ -82,10 +83,10 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
                        }
                }
        }
-
+       ret = cpm->package.count;
        kfree(buffer.pointer);
 
-       return cpm->package.count;
+       return ret;
 }
 
 static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data)
index ae2806aafb7267c474f54ffac8bc33232739c469..0c52dfe649771db4a4a23e7e09365dbbc33932e1 100644 (file)
@@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
 
        /* Prevent the module from being removed whilst attached to a trigger */
        __module_get(pf->indio_dev->info->driver_module);
+
+       /* Get irq number */
        pf->irq = iio_trigger_get_irq(trig);
+       if (pf->irq < 0)
+               goto out_put_module;
+
+       /* Request irq */
        ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
                                   pf->type, pf->name,
                                   pf);
-       if (ret < 0) {
-               module_put(pf->indio_dev->info->driver_module);
-               return ret;
-       }
+       if (ret < 0)
+               goto out_put_irq;
 
+       /* Enable trigger in driver */
        if (trig->ops && trig->ops->set_trigger_state && notinuse) {
                ret = trig->ops->set_trigger_state(trig, true);
                if (ret < 0)
-                       module_put(pf->indio_dev->info->driver_module);
+                       goto out_free_irq;
        }
 
        return ret;
+
+out_free_irq:
+       free_irq(pf->irq, pf);
+out_put_irq:
+       iio_trigger_put_irq(trig, pf->irq);
+out_put_module:
+       module_put(pf->indio_dev->info->driver_module);
+       return ret;
 }
 
 static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
index b4dbb39129773467dad31c97eedeb33482a31ecf..651d57b8abbf910b3e94ad9e628e312b958cbb50 100644 (file)
@@ -1011,6 +1011,7 @@ static int apds9960_probe(struct i2c_client *client,
 
        iio_device_attach_buffer(indio_dev, buffer);
 
+       indio_dev->dev.parent = &client->dev;
        indio_dev->info = &apds9960_info;
        indio_dev->name = APDS9960_DRV_NAME;
        indio_dev->channels = apds9960_channels;
index 72b364e4aa727f5908fdd6ccf76902fcfb0ffe66..b54dcba05a8294d988d6bf5421cca21f01224159 100644 (file)
@@ -84,7 +84,7 @@ static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev,
        int ret;
 
        if (!readval)
-               bh1780_write(bh1780, (u8)reg, (u8)writeval);
+               return bh1780_write(bh1780, (u8)reg, (u8)writeval);
 
        ret = bh1780_read(bh1780, (u8)reg);
        if (ret < 0)
@@ -187,7 +187,7 @@ static int bh1780_probe(struct i2c_client *client,
 
        indio_dev->dev.parent = &client->dev;
        indio_dev->info = &bh1780_info;
-       indio_dev->name = id->name;
+       indio_dev->name = "bh1780";
        indio_dev->channels = bh1780_channels;
        indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
        indio_dev->modes = INDIO_DIRECT_MODE;
@@ -226,7 +226,8 @@ static int bh1780_remove(struct i2c_client *client)
 static int bh1780_runtime_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct bh1780_data *bh1780 = iio_priv(indio_dev);
        int ret;
 
        ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
@@ -241,7 +242,8 @@ static int bh1780_runtime_suspend(struct device *dev)
 static int bh1780_runtime_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct bh1780_data *bh1780 = iio_priv(indio_dev);
        int ret;
 
        ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
index e01e58a9bd1419397dda85160e7cc625ab7fd6e7..f17cb2ea18f59c9ea4f368e26488e50d114b05ec 100644 (file)
@@ -147,7 +147,6 @@ static const struct iio_chan_spec max44000_channels[] = {
        {
                .type = IIO_PROXIMITY,
                .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .scan_index = MAX44000_SCAN_INDEX_PRX,
                .scan_type = {
                        .sign           = 'u',
index ecd3bd0a97693420c13bfae8fc583ff74854edf8..0a9e8fadfa9de8a66dd3d1fee8d82f81cc6b10a4 100644 (file)
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
 
 int st_magn_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
 }
 
index 62036d2a9956aa426cdde5ce71906e4e82663510..8250fc322c56754aa23ea4c877ff3414b1f58d81 100644 (file)
@@ -572,6 +572,7 @@ static const struct iio_info magn_info = {
 static const struct iio_trigger_ops st_magn_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
 #else
index 2f1498e12bb28978ca8bea9356bfac9338b407fe..724452d61846c67155120876af55921f56e54c6a 100644 (file)
@@ -879,8 +879,8 @@ static int bmp280_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
        if (chip_id != id->driver_data) {
-               dev_err(&client->dev, "bad chip id.  expected %x got %x\n",
-                       BMP280_CHIP_ID, chip_id);
+               dev_err(&client->dev, "bad chip id.  expected %lx got %x\n",
+                       id->driver_data, chip_id);
                return -EINVAL;
        }
 
index 2ff53f222352ca66d4057ff054732e6a0a581db6..99468d0a64e74ce3f5ab0701f6d38e9f77c8c49c 100644 (file)
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = {
 
 int st_press_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_press_buffer_setup_ops);
 }
 
index 9e9b72a8f18f78fe7d7084710b4854604b7fb0b8..92a118c3c4acf3bc1529c4350dde5154b41c4abb 100644 (file)
 #include <linux/iio/common/st_sensors.h>
 #include "st_pressure.h"
 
+#define MCELSIUS_PER_CELSIUS                   1000
+
+/* Default pressure sensitivity */
 #define ST_PRESS_LSB_PER_MBAR                  4096UL
 #define ST_PRESS_KPASCAL_NANO_SCALE            (100000000UL / \
                                                 ST_PRESS_LSB_PER_MBAR)
+
+/* Default temperature sensitivity */
 #define ST_PRESS_LSB_PER_CELSIUS               480UL
-#define ST_PRESS_CELSIUS_NANO_SCALE            (1000000000UL / \
-                                                ST_PRESS_LSB_PER_CELSIUS)
+#define ST_PRESS_MILLI_CELSIUS_OFFSET          42500UL
+
 #define ST_PRESS_NUMBER_DATA_CHANNELS          1
 
 /* FULLSCALE */
+#define ST_PRESS_FS_AVL_1100MB                 1100
 #define ST_PRESS_FS_AVL_1260MB                 1260
 
 #define ST_PRESS_1_OUT_XL_ADDR                 0x28
@@ -54,9 +60,6 @@
 #define ST_PRESS_LPS331AP_PW_MASK              0x80
 #define ST_PRESS_LPS331AP_FS_ADDR              0x23
 #define ST_PRESS_LPS331AP_FS_MASK              0x30
-#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL      0x00
-#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN     ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN     ST_PRESS_CELSIUS_NANO_SCALE
 #define ST_PRESS_LPS331AP_BDU_ADDR             0x20
 #define ST_PRESS_LPS331AP_BDU_MASK             0x04
 #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR                0x22
 #define ST_PRESS_LPS331AP_OD_IRQ_ADDR          0x22
 #define ST_PRESS_LPS331AP_OD_IRQ_MASK          0x40
 #define ST_PRESS_LPS331AP_MULTIREAD_BIT                true
-#define ST_PRESS_LPS331AP_TEMP_OFFSET          42500
 
 /* CUSTOM VALUES FOR LPS001WP SENSOR */
+
+/* LPS001WP pressure resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_MBAR         16UL
+/* LPS001WP temperature resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS      64UL
+
 #define ST_PRESS_LPS001WP_WAI_EXP              0xba
 #define ST_PRESS_LPS001WP_ODR_ADDR             0x20
 #define ST_PRESS_LPS001WP_ODR_MASK             0x30
@@ -78,6 +86,8 @@
 #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL     0x03
 #define ST_PRESS_LPS001WP_PW_ADDR              0x20
 #define ST_PRESS_LPS001WP_PW_MASK              0x40
+#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
+       (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
 #define ST_PRESS_LPS001WP_BDU_ADDR             0x20
 #define ST_PRESS_LPS001WP_BDU_MASK             0x04
 #define ST_PRESS_LPS001WP_MULTIREAD_BIT                true
 #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL       0x04
 #define ST_PRESS_LPS25H_PW_ADDR                        0x20
 #define ST_PRESS_LPS25H_PW_MASK                        0x80
-#define ST_PRESS_LPS25H_FS_ADDR                        0x00
-#define ST_PRESS_LPS25H_FS_MASK                        0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_VAL                0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN       ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN       ST_PRESS_CELSIUS_NANO_SCALE
 #define ST_PRESS_LPS25H_BDU_ADDR               0x20
 #define ST_PRESS_LPS25H_BDU_MASK               0x04
 #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR          0x23
 #define ST_PRESS_LPS25H_OD_IRQ_ADDR            0x22
 #define ST_PRESS_LPS25H_OD_IRQ_MASK            0x40
 #define ST_PRESS_LPS25H_MULTIREAD_BIT          true
-#define ST_PRESS_LPS25H_TEMP_OFFSET            42500
 #define ST_PRESS_LPS25H_OUT_XL_ADDR            0x28
 #define ST_TEMP_LPS25H_OUT_L_ADDR              0x2b
 
@@ -161,7 +165,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
                        .storagebits = 16,
                        .endianness = IIO_LE,
                },
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) |
+                       BIT(IIO_CHAN_INFO_SCALE),
                .modified = 0,
        },
        {
@@ -177,7 +183,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
                },
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) |
-                       BIT(IIO_CHAN_INFO_OFFSET),
+                       BIT(IIO_CHAN_INFO_SCALE),
                .modified = 0,
        },
        IIO_CHAN_SOFT_TIMESTAMP(1)
@@ -212,11 +218,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .addr = ST_PRESS_LPS331AP_FS_ADDR,
                        .mask = ST_PRESS_LPS331AP_FS_MASK,
                        .fs_avl = {
+                               /*
+                                * Pressure and temperature sensitivity values
+                                * as defined in table 3 of LPS331AP datasheet.
+                                */
                                [0] = {
                                        .num = ST_PRESS_FS_AVL_1260MB,
-                                       .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
-                                       .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
-                                       .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
+                                       .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+                                       .gain2 = ST_PRESS_LSB_PER_CELSIUS,
                                },
                        },
                },
@@ -261,7 +270,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
                },
                .fs = {
-                       .addr = 0,
+                       .fs_avl = {
+                               /*
+                                * Pressure and temperature resolution values
+                                * as defined in table 3 of LPS001WP datasheet.
+                                */
+                               [0] = {
+                                       .num = ST_PRESS_FS_AVL_1100MB,
+                                       .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
+                                       .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
+                               },
+                       },
                },
                .bdu = {
                        .addr = ST_PRESS_LPS001WP_BDU_ADDR,
@@ -298,14 +317,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
                },
                .fs = {
-                       .addr = ST_PRESS_LPS25H_FS_ADDR,
-                       .mask = ST_PRESS_LPS25H_FS_MASK,
                        .fs_avl = {
+                               /*
+                                * Pressure and temperature sensitivity values
+                                * as defined in table 3 of LPS25H datasheet.
+                                */
                                [0] = {
                                        .num = ST_PRESS_FS_AVL_1260MB,
-                                       .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
-                                       .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
-                                       .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
+                                       .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+                                       .gain2 = ST_PRESS_LSB_PER_CELSIUS,
                                },
                        },
                },
@@ -364,26 +384,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
 
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_SCALE:
-               *val = 0;
-
                switch (ch->type) {
                case IIO_PRESSURE:
+                       *val = 0;
                        *val2 = press_data->current_fullscale->gain;
-                       break;
+                       return IIO_VAL_INT_PLUS_NANO;
                case IIO_TEMP:
+                       *val = MCELSIUS_PER_CELSIUS;
                        *val2 = press_data->current_fullscale->gain2;
-                       break;
+                       return IIO_VAL_FRACTIONAL;
                default:
                        err = -EINVAL;
                        goto read_error;
                }
 
-               return IIO_VAL_INT_PLUS_NANO;
        case IIO_CHAN_INFO_OFFSET:
                switch (ch->type) {
                case IIO_TEMP:
-                       *val = 425;
-                       *val2 = 10;
+                       *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
+                              press_data->current_fullscale->gain2;
+                       *val2 = MCELSIUS_PER_CELSIUS;
                        break;
                default:
                        err = -EINVAL;
@@ -425,6 +445,7 @@ static const struct iio_info press_info = {
 static const struct iio_trigger_ops st_press_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops)
 #else
index f4d29d5dbd5f88d6e5edffef142e86db30076d44..e2f926cdcad2acdbdd8aec50f0f479f52d2a6584 100644 (file)
@@ -64,6 +64,7 @@ struct as3935_state {
        struct delayed_work work;
 
        u32 tune_cap;
+       u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
        u8 buf[2] ____cacheline_aligned;
 };
 
@@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
                .type           = IIO_PROXIMITY,
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) |
-                       BIT(IIO_CHAN_INFO_PROCESSED),
+                       BIT(IIO_CHAN_INFO_PROCESSED) |
+                       BIT(IIO_CHAN_INFO_SCALE),
                .scan_index     = 0,
                .scan_type = {
                        .sign           = 'u',
@@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
                /* storm out of range */
                if (*val == AS3935_DATA_MASK)
                        return -EINVAL;
-               *val *= 1000;
+
+               if (m == IIO_CHAN_INFO_PROCESSED)
+                       *val *= 1000;
+               break;
+       case IIO_CHAN_INFO_SCALE:
+               *val = 1000;
                break;
        default:
                return -EINVAL;
@@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
        ret = as3935_read(st, AS3935_DATA, &val);
        if (ret)
                goto err_read;
-       val &= AS3935_DATA_MASK;
-       val *= 1000;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
+       st->buffer[0] = val & AS3935_DATA_MASK;
+       iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
+                                          pf->timestamp);
 err_read:
        iio_trigger_notify_done(indio_dev->trig);
 
index c2e257d97effb9813ef22b58ab580507dbf4134d..1a2984c28b9589410b381a6a0277aff188395907 100644 (file)
@@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
 {
        int ret = 0;
        struct net_device *old_net_dev;
+       enum ib_gid_type old_gid_type;
 
        /* in rdma_cap_roce_gid_table, this funciton should be protected by a
         * sleep-able lock.
@@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
        }
 
        old_net_dev = table->data_vec[ix].attr.ndev;
+       old_gid_type = table->data_vec[ix].attr.gid_type;
        if (old_net_dev && old_net_dev != attr->ndev)
                dev_put(old_net_dev);
        /* if modify_gid failed, just delete the old gid */
@@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
                attr = &zattr;
                table->data_vec[ix].context = NULL;
        }
-       if (default_gid)
-               table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+
        memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
        memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
+       if (default_gid) {
+               table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+               if (action == GID_TABLE_WRITE_ACTION_DEL)
+                       table->data_vec[ix].attr.gid_type = old_gid_type;
+       }
        if (table->data_vec[ix].attr.ndev &&
            table->data_vec[ix].attr.ndev != old_net_dev)
                dev_hold(table->data_vec[ix].attr.ndev);
@@ -405,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 
        for (ix = 0; ix < table->sz; ix++)
                if (table->data_vec[ix].attr.ndev == ndev)
-                       if (!del_gid(ib_dev, port, table, ix, false))
+                       if (!del_gid(ib_dev, port, table, ix,
+                                    !!(table->data_vec[ix].props &
+                                       GID_TABLE_ENTRY_DEFAULT)))
                                deleted = true;
 
        write_unlock_irq(&table->rwlock);
index 1d92e091e22ef0bdb64ee7433eb2cf5fbba9600c..c99525512b3434d24a7882d4f869f6e675da44b2 100644 (file)
@@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
        work->cm_event.event = IB_CM_USER_ESTABLISHED;
 
        /* Check if the device started its remove_one */
-       spin_lock_irq(&cm.lock);
+       spin_lock_irqsave(&cm.lock, flags);
        if (!cm_dev->going_down) {
                queue_delayed_work(cm.wq, &work->work, 0);
        } else {
                kfree(work);
                ret = -ENODEV;
        }
-       spin_unlock_irq(&cm.lock);
+       spin_unlock_irqrestore(&cm.lock, flags);
 
 out:
        return ret;
index f0c91ba3178a12030a8cc4e38db7a651633759b8..ad1b1adcf6f01894f1fc6f3f35eed308d55e269c 100644 (file)
@@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
                complete(&id_priv->comp);
 }
 
-static int cma_disable_callback(struct rdma_id_private *id_priv,
-                               enum rdma_cm_state state)
-{
-       mutex_lock(&id_priv->handler_mutex);
-       if (id_priv->state != state) {
-               mutex_unlock(&id_priv->handler_mutex);
-               return -EINVAL;
-       }
-       return 0;
-}
-
 struct rdma_cm_id *rdma_create_id(struct net *net,
                                  rdma_cm_event_handler event_handler,
                                  void *context, enum rdma_port_space ps,
@@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
        struct rdma_cm_event event;
        int ret = 0;
 
+       mutex_lock(&id_priv->handler_mutex);
        if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
-               cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
+            id_priv->state != RDMA_CM_CONNECT) ||
            (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
-               cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
-               return 0;
+            id_priv->state != RDMA_CM_DISCONNECT))
+               goto out;
 
        memset(&event, 0, sizeof event);
        switch (ib_event->event) {
@@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e
 
 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
 {
-       struct rdma_id_private *listen_id, *conn_id;
+       struct rdma_id_private *listen_id, *conn_id = NULL;
        struct rdma_cm_event event;
        struct net_device *net_dev;
        int offset, ret;
@@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                goto net_dev_put;
        }
 
-       if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
+       mutex_lock(&listen_id->handler_mutex);
+       if (listen_id->state != RDMA_CM_LISTEN) {
                ret = -ECONNABORTED;
-               goto net_dev_put;
+               goto err1;
        }
 
        memset(&event, 0, sizeof event);
@@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
        struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
        struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
 
-       if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
-               return 0;
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != RDMA_CM_CONNECT)
+               goto out;
 
        memset(&event, 0, sizeof event);
        switch (iw_event->event) {
@@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
                return ret;
        }
 
+out:
        mutex_unlock(&id_priv->handler_mutex);
        return ret;
 }
@@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        struct rdma_cm_id *new_cm_id;
        struct rdma_id_private *listen_id, *conn_id;
        struct rdma_cm_event event;
-       int ret;
+       int ret = -ECONNABORTED;
        struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
        struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
 
        listen_id = cm_id->context;
-       if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
-               return -ECONNABORTED;
+
+       mutex_lock(&listen_id->handler_mutex);
+       if (listen_id->state != RDMA_CM_LISTEN)
+               goto out;
 
        /* Create a new RDMA id for the new IW CM ID */
        new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
@@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
        struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
        int ret = 0;
 
-       if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
-               return 0;
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != RDMA_CM_CONNECT)
+               goto out;
 
        memset(&event, 0, sizeof event);
        switch (ib_event->event) {
@@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
        struct rdma_id_private *id_priv;
        struct cma_multicast *mc = multicast->context;
        struct rdma_cm_event event;
-       int ret;
+       int ret = 0;
 
        id_priv = mc->id_priv;
-       if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
-           cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
-               return 0;
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != RDMA_CM_ADDR_BOUND &&
+           id_priv->state != RDMA_CM_ADDR_RESOLVED)
+               goto out;
 
        if (!status)
                status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
@@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
                return 0;
        }
 
+out:
        mutex_unlock(&id_priv->handler_mutex);
        return 0;
 }
@@ -3878,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
        gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
                   rdma_start_port(id_priv->cma_dev->device)];
        if (addr->sa_family == AF_INET) {
-               if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+               if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+                       mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
                        err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
                                            true);
-               if (!err) {
-                       mc->igmp_joined = true;
-                       mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+                       if (!err)
+                               mc->igmp_joined = true;
                }
        } else {
                if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
index 5516fb0703442cafc0c917d3115b8b25e7c71b27..5c155fa91eec8380a463ad687a6aa977c10bcefd 100644 (file)
@@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device,
        if (err || port_attr->subnet_prefix)
                return err;
 
+       if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+               return 0;
+
        err = ib_query_gid(device, port_num, 0, &gid, NULL);
        if (err)
                return err;
@@ -1024,7 +1027,8 @@ static int __init ib_core_init(void)
                goto err_mad;
        }
 
-       if (ib_add_ibnl_clients()) {
+       ret = ib_add_ibnl_clients();
+       if (ret) {
                pr_warn("Couldn't register ibnl clients\n");
                goto err_sa;
        }
index 43e3fa27102b8cd40fb39dc6e4747deed82c0f75..1c41b95cefec0a9e84a7cb151c2765c4d016fd9a 100644 (file)
@@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
        if (!nlmsg_request) {
                pr_info("%s: Could not find a matching request (seq = %u)\n",
                                 __func__, msg_seq);
-                       return -EINVAL;
+               return -EINVAL;
        }
        pm_msg = nlmsg_request->req_buffer;
        local_sockaddr = (struct sockaddr_storage *)
index 82fb511112da745e3fdbf30fe35bcebfceee7491..2d49228f28b2b18ac537b1bb982127cf66138356 100644 (file)
@@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
                /* Now, check to see if there are any methods still in use */
                if (!check_method_table(method)) {
                        /* If not, release management method table */
-                        kfree(method);
-                        class->method_table[mgmt_class] = NULL;
-                        /* Any management classes left ? */
+                       kfree(method);
+                       class->method_table[mgmt_class] = NULL;
+                       /* Any management classes left ? */
                        if (!check_class_table(class)) {
                                /* If not, release management class table */
                                kfree(class);
index 5e573bb18660d68f9e5c051ce3c7859ce08dea03..60df4f8e81bed10ac8e9b6149ec4c3ef5c6abf9f 100644 (file)
@@ -530,6 +530,7 @@ static PORT_PMA_ATTR(port_xmit_data             , 12, 32, 192);
 static PORT_PMA_ATTR(port_rcv_data                 , 13, 32, 224);
 static PORT_PMA_ATTR(port_xmit_packets             , 14, 32, 256);
 static PORT_PMA_ATTR(port_rcv_packets              , 15, 32, 288);
+static PORT_PMA_ATTR(port_xmit_wait                ,  0, 32, 320);
 
 /*
  * Counters added by extended set
@@ -560,6 +561,7 @@ static struct attribute *pma_attrs[] = {
        &port_pma_attr_port_rcv_data.attr.attr,
        &port_pma_attr_port_xmit_packets.attr.attr,
        &port_pma_attr_port_rcv_packets.attr.attr,
+       &port_pma_attr_port_xmit_wait.attr.attr,
        NULL
 };
 
@@ -579,6 +581,7 @@ static struct attribute *pma_attrs_ext[] = {
        &port_pma_attr_ext_port_xmit_data.attr.attr,
        &port_pma_attr_ext_port_rcv_data.attr.attr,
        &port_pma_attr_ext_port_xmit_packets.attr.attr,
+       &port_pma_attr_port_xmit_wait.attr.attr,
        &port_pma_attr_ext_port_rcv_packets.attr.attr,
        &port_pma_attr_ext_unicast_rcv_packets.attr.attr,
        &port_pma_attr_ext_unicast_xmit_packets.attr.attr,
@@ -604,6 +607,7 @@ static struct attribute *pma_attrs_noietf[] = {
        &port_pma_attr_ext_port_rcv_data.attr.attr,
        &port_pma_attr_ext_port_xmit_packets.attr.attr,
        &port_pma_attr_ext_port_rcv_packets.attr.attr,
+       &port_pma_attr_port_xmit_wait.attr.attr,
        NULL
 };
 
@@ -889,9 +893,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
 static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
                           u8 port_num)
 {
-       struct attribute_group *hsag = NULL;
+       struct attribute_group *hsag;
        struct rdma_hw_stats *stats;
-       int i = 0, ret;
+       int i, ret;
 
        stats = device->alloc_hw_stats(device, port_num);
 
@@ -899,19 +903,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
                return;
 
        if (!stats->names || stats->num_counters <= 0)
-               goto err;
+               goto err_free_stats;
 
+       /*
+        * Two extra attribue elements here, one for the lifespan entry and
+        * one to NULL terminate the list for the sysfs core code
+        */
        hsag = kzalloc(sizeof(*hsag) +
-                      // 1 extra for the lifespan config entry
-                      sizeof(void *) * (stats->num_counters + 1),
+                      sizeof(void *) * (stats->num_counters + 2),
                       GFP_KERNEL);
        if (!hsag)
-               return;
+               goto err_free_stats;
 
        ret = device->get_hw_stats(device, stats, port_num,
                                   stats->num_counters);
        if (ret != stats->num_counters)
-               goto err;
+               goto err_free_hsag;
 
        stats->timestamp = jiffies;
 
@@ -922,10 +929,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
                hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
                if (!hsag->attrs[i])
                        goto err;
+               sysfs_attr_init(hsag->attrs[i]);
        }
 
        /* treat an error here as non-fatal */
        hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
+       if (hsag->attrs[i])
+               sysfs_attr_init(hsag->attrs[i]);
 
        if (port) {
                struct kobject *kobj = &port->kobj;
@@ -946,10 +956,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
        return;
 
 err:
-       kfree(stats);
        for (; i >= 0; i--)
                kfree(hsag->attrs[i]);
+err_free_hsag:
        kfree(hsag);
+err_free_stats:
+       kfree(stats);
        return;
 }
 
index 1a8babb8ee3c4d328af7e08063cef8f147086a10..825021d1008b9a13ee5a3b02ed0a1dd7ce80d0ee 100644 (file)
@@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file,
        struct ib_srq                   *srq = NULL;
        struct ib_qp                    *qp;
        char                            *buf;
-       struct ib_qp_init_attr          attr;
+       struct ib_qp_init_attr          attr = {};
        struct ib_uverbs_ex_create_qp_resp resp;
        int                             ret;
 
index 1d7d4cf442e3c9646b6824e943e7e649c873d104..6298f54b413756a5bf0f19891a080739ec6300ef 100644 (file)
@@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
                ah_attr->grh.dgid = sgid;
 
                if (!rdma_cap_eth_ah(device, port_num)) {
-                       ret = ib_find_cached_gid_by_port(device, &dgid,
-                                                        IB_GID_TYPE_IB,
-                                                        port_num, NULL,
-                                                        &gid_index);
-                       if (ret)
-                               return ret;
+                       if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
+                               ret = ib_find_cached_gid_by_port(device, &dgid,
+                                                                IB_GID_TYPE_IB,
+                                                                port_num, NULL,
+                                                                &gid_index);
+                               if (ret)
+                                       return ret;
+                       } else {
+                               gid_index = 0;
+                       }
                }
 
                ah_attr->grh.sgid_index = (u8) gid_index;
index 6e7050ab9e16049039406b8cbb207cdfced18109..14d7eeb09be6545f5f21144f0f11ea41c53203a4 100644 (file)
@@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
        const struct cpumask *node_mask,
                *proc_mask = tsk_cpus_allowed(current);
        struct cpu_mask_set *set = &dd->affinity->proc;
-       char buf[1024];
 
        /*
         * check whether process/context affinity has already
         * been set
         */
        if (cpumask_weight(proc_mask) == 1) {
-               scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
-               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
-                         current->pid, current->comm, buf);
+               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+                         current->pid, current->comm,
+                         cpumask_pr_args(proc_mask));
                /*
                 * Mark the pre-set CPU as used. This is atomic so we don't
                 * need the lock
@@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
                cpumask_set_cpu(cpu, &set->used);
                goto done;
        } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
-               scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
-               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
-                         current->pid, current->comm, buf);
+               hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+                         current->pid, current->comm,
+                         cpumask_pr_args(proc_mask));
                goto done;
        }
 
@@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
        cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
                                  &dd->affinity->rcv_intr.mask :
                                  &dd->affinity->rcv_intr.used));
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
-       hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
+       hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
+                 cpumask_pr_args(intrs));
 
        /*
         * If we don't have a NUMA node requested, preference is towards
@@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
        if (node == -1)
                node = dd->node;
        node_mask = cpumask_of_node(node);
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
-       hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
+       hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+                 cpumask_pr_args(node_mask));
 
        /* diff will hold all unused cpus */
        cpumask_andnot(diff, &set->mask, &set->used);
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
-       hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
+       hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
 
        /* get cpumask of available CPUs on preferred NUMA */
        cpumask_and(mask, diff, node_mask);
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
-       hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
+       hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
 
        /*
         * At first, we don't want to place processes on the same
@@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
                cpumask_andnot(diff, &set->mask, &set->used);
                cpumask_andnot(mask, diff, node_mask);
        }
-       scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
-       hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
+       hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
+                 cpumask_pr_args(mask));
 
        cpu = cpumask_first(mask);
        if (cpu >= nr_cpu_ids) /* empty */
index 3b876da745a1dc60d2b589b5fbe756cdc33419aa..dad4d0ebbdffb45e2c667cc95b86aabf39b53a87 100644 (file)
@@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
 static void dc_start(struct hfi1_devdata *);
 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
                           unsigned int *np);
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
 
 /*
  * Error interrupt table entry.  This is used as input to the interrupt
@@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work)
        }
 
        reset_neighbor_info(ppd);
-       if (ppd->mgmt_allowed)
-               remove_full_mgmt_pkey(ppd);
 
        /* disable the port */
        clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
                            __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
        ppd->pkeys[2] = FULL_MGMT_P_KEY;
        (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+       hfi1_event_pkey_change(ppd->dd, ppd->port);
 }
 
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
 {
-       ppd->pkeys[2] = 0;
-       (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+       if (ppd->pkeys[2] != 0) {
+               ppd->pkeys[2] = 0;
+               (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+               hfi1_event_pkey_change(ppd->dd, ppd->port);
+       }
 }
 
 /*
@@ -7832,8 +7834,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
                         * save first 2 flits in the packet that caused
                         * the error
                         */
-                        dd->err_info_rcvport.packet_flit1 = hdr0;
-                        dd->err_info_rcvport.packet_flit2 = hdr1;
+                       dd->err_info_rcvport.packet_flit1 = hdr0;
+                       dd->err_info_rcvport.packet_flit2 = hdr1;
                }
                switch (info) {
                case 1:
@@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd)
                return 0;
        }
 
+       /*
+        * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
+        * pkey table can be configured properly if the HFI unit is connected
+        * to switch port with MgmtAllowed=NO
+        */
+       clear_full_mgmt_pkey(ppd);
+
        return set_link_state(ppd, HLS_DN_POLL);
 }
 
@@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd)
        u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
                              & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
                SEND_LEN_CHECK1_LEN_VL15_SHIFT;
-       int i;
+       int i, j;
        u32 thres;
 
        for (i = 0; i < ppd->vls_supported; i++) {
@@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd)
                            sc_mtu_to_threshold(dd->vld[i].sc,
                                                dd->vld[i].mtu,
                                                dd->rcd[0]->rcvhdrqentsize));
-               sc_set_cr_threshold(dd->vld[i].sc, thres);
+               for (j = 0; j < INIT_SC_PER_VL; j++)
+                       sc_set_cr_threshold(
+                                       pio_select_send_context_vl(dd, j, i),
+                                           thres);
        }
        thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
                    sc_mtu_to_threshold(dd->vld[15].sc,
@@ -11906,7 +11918,7 @@ static void update_synth_timer(unsigned long opaque)
                hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
        }
 
-mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+       mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
 }
 
 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
@@ -14101,8 +14113,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
 {
        unsigned long flags;
        struct hfi1_devdata *tmp, *peer = NULL;
+       struct hfi1_asic_data *asic_data;
        int ret = 0;
 
+       /* pre-allocate the asic structure in case we are the first device */
+       asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
+       if (!asic_data)
+               return -ENOMEM;
+
        spin_lock_irqsave(&hfi1_devs_lock, flags);
        /* Find our peer device */
        list_for_each_entry(tmp, &hfi1_dev_list, list) {
@@ -14114,18 +14132,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
        }
 
        if (peer) {
+               /* use already allocated structure */
                dd->asic_data = peer->asic_data;
+               kfree(asic_data);
        } else {
-               dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
-               if (!dd->asic_data) {
-                       ret = -ENOMEM;
-                       goto done;
-               }
+               dd->asic_data = asic_data;
                mutex_init(&dd->asic_data->asic_resource_mutex);
        }
        dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
-
-done:
        spin_unlock_irqrestore(&hfi1_devs_lock, flags);
        return ret;
 }
index 7a5b0e676cc7a4b1c1d9a65c76d66ba2f23a1f32..c702a009608f27a62b06b7ad4eb2d54914fab03b 100644 (file)
@@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
 
        switch (cmd) {
        case HFI1_IOCTL_ASSIGN_CTXT:
+               if (uctxt)
+                       return -EINVAL;
+
                if (copy_from_user(&uinfo,
                                   (struct hfi1_user_info __user *)arg,
                                   sizeof(uinfo)))
index 5cc492e5776d8d87f3833136ed6d33b05f700da0..eed971ccd2a1e88e7180f2eae5aba92da5448c96 100644 (file)
@@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
                dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
                                  (void *)dd->rcvhdrtail_dummy_kvaddr,
                                  dd->rcvhdrtail_dummy_physaddr);
-                                 dd->rcvhdrtail_dummy_kvaddr = NULL;
+               dd->rcvhdrtail_dummy_kvaddr = NULL;
        }
 
        for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
@@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        int ret = 0, j, pidx, initfail;
-       struct hfi1_devdata *dd = NULL;
+       struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
        struct hfi1_pportdata *ppd;
 
        /* First, lock the non-writable module parameters */
index 219029576ba0b746646b0bfc9402279d40649c9a..fca07a1d6c284b90238543777e85462cc2249651 100644 (file)
@@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
        memset(data, 0, size);
 }
 
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
+{
+       struct ib_event event;
+
+       event.event = IB_EVENT_PKEY_CHANGE;
+       event.device = &dd->verbs_dev.rdi.ibdev;
+       event.element.port_num = port;
+       ib_dispatch_event(&event);
+}
+
 static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
 {
        struct ib_mad_send_buf *send_buf;
@@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
        }
 
        if (changed) {
-               struct ib_event event;
-
                (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
-
-               event.event = IB_EVENT_PKEY_CHANGE;
-               event.device = &dd->verbs_dev.rdi.ibdev;
-               event.element.port_num = port;
-               ib_dispatch_event(&event);
+               hfi1_event_pkey_change(dd, port);
        }
+
        return 0;
 }
 
index 55ee0867533324bd9b325ffbac1562abc22767db..8b734aaae88adf48be52a1a2e3d17ca4f800b7ce 100644 (file)
@@ -434,4 +434,6 @@ struct sc2vlnt {
                    COUNTER_MASK(1, 3) | \
                    COUNTER_MASK(1, 4))
 
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
+
 #endif                         /* _HFI1_MAD_H */
index d5edb1afbb8f8b8ed98135bfc9b961e00d535b4e..d4022450b73f7a9c6d8d528fbe44b3dd2fa0e00a 100644 (file)
@@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
                /* counter is reset if occupancy count changes */
                if (reg != reg_prev)
                        loop = 0;
-               if (loop > 500) {
+               if (loop > 50000) {
                        /* timed out - bounce the link */
                        dd_dev_err(dd,
                                   "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
@@ -1797,6 +1797,21 @@ static void pio_map_rcu_callback(struct rcu_head *list)
        pio_map_free(m);
 }
 
+/*
+ * Set credit return threshold for the kernel send context
+ */
+static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
+{
+       u32 thres;
+
+       thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
+                                           50),
+                   sc_mtu_to_threshold(dd->kernel_send_context[scontext],
+                                       dd->vld[i].mtu,
+                                       dd->rcd[0]->rcvhdrqentsize));
+       sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
+}
+
 /*
  * pio_map_init - called when #vls change
  * @dd: hfi1_devdata
@@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
                        if (!newmap->map[i])
                                goto bail;
                        newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
-                       /* assign send contexts */
+                       /*
+                        * assign send contexts and
+                        * adjust credit return threshold
+                        */
                        for (j = 0; j < sz; j++) {
-                               if (dd->kernel_send_context[scontext])
+                               if (dd->kernel_send_context[scontext]) {
                                        newmap->map[i]->ksc[j] =
                                        dd->kernel_send_context[scontext];
+                                       set_threshold(dd, scontext, i);
+                               }
                                if (++scontext >= first_scontext +
                                                  vl_scontexts[i])
                                        /* wrap back to first send context */
index 2441669f0817dbec51c7fbcdc847c0bff9f48bfb..9fb561682c661b92e2a09beca2b53daf9f8c5b5e 100644 (file)
@@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
 
        if (ppd->qsfp_info.cache_valid) {
                if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
-                       sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
+                       snprintf(lenstr, sizeof(lenstr), "%dM ",
+                                cache[QSFP_MOD_LEN_OFFS]);
 
                power_byte = cache[QSFP_MOD_PWR_OFFS];
                sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
index 79b2952c0dfb31a50f0f08e4a74d3f60b754e861..4cfb13771897ca35b91e476c17afabed48a9e5ce 100644 (file)
@@ -214,19 +214,6 @@ const char *print_u32_array(
        return ret;
 }
 
-const char *print_u64_array(
-       struct trace_seq *p,
-       u64 *arr, int len)
-{
-       int i;
-       const char *ret = trace_seq_buffer_ptr(p);
-
-       for (i = 0; i < len; i++)
-               trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
-       trace_seq_putc(p, 0);
-       return ret;
-}
-
 __hfi1_trace_fn(PKT);
 __hfi1_trace_fn(PROC);
 __hfi1_trace_fn(SDMA);
index 1e503ad0bebb764ee1e05462604538e6324002f8..be91f6fa1c87b16fe4f31affd6c12848b1951792 100644 (file)
@@ -678,8 +678,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
        u32 tlen = packet->tlen;
        struct rvt_qp *qp = packet->qp;
        bool has_grh = rcv_flags & HFI1_HAS_GRH;
-       bool sc4_bit = has_sc4_bit(packet);
-       u8 sc;
+       u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
        u32 bth1;
        int is_mcast;
        struct ib_grh *grh = NULL;
@@ -697,10 +696,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
                 */
                struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
                u32 lqpn =  be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
-               u8 sl, sc5;
+               u8 sl;
 
-               sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-               sc5 |= sc4_bit;
                sl = ibp->sc_to_sl[sc5];
 
                process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
@@ -717,10 +714,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
 
        if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
                u16 slid = be16_to_cpu(hdr->lrh[3]);
-               u8 sc5;
-
-               sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-               sc5 |= sc4_bit;
 
                return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
        }
@@ -745,10 +738,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
                if (qp->ibqp.qp_num > 1) {
                        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
                        u16 slid;
-                       u8 sc5;
-
-                       sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-                       sc5 |= sc4_bit;
 
                        slid = be16_to_cpu(hdr->lrh[3]);
                        if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
@@ -790,10 +779,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
                /* Received on QP0, and so by definition, this is an SMP */
                struct opa_smp *smp = (struct opa_smp *)data;
                u16 slid = be16_to_cpu(hdr->lrh[3]);
-               u8 sc5;
-
-               sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-               sc5 |= sc4_bit;
 
                if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
                        goto drop;
@@ -890,9 +875,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
        }
 
        wc.slid = be16_to_cpu(hdr->lrh[3]);
-       sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-       sc |= sc4_bit;
-       wc.sl = ibp->sc_to_sl[sc];
+       wc.sl = ibp->sc_to_sl[sc5];
 
        /*
         * Save the LMC lower bits if the destination LID is a unicast LID.
index 29f4795f866cd19185c680d28f95302e206f62e4..47ffd273ecbd7745d25067f4f8268301d89dde25 100644 (file)
@@ -183,7 +183,7 @@ struct user_sdma_iovec {
        struct sdma_mmu_node *node;
 };
 
-#define SDMA_CACHE_NODE_EVICT BIT(0)
+#define SDMA_CACHE_NODE_EVICT 0
 
 struct sdma_mmu_node {
        struct mmu_rb_node rb;
@@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req,
                 */
                SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
                         req->tidoffset, req->tidoffset / req->omfactor,
-                        !!(req->omfactor - KDETH_OM_SMALL));
+                        req->omfactor != KDETH_OM_SMALL);
                KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
                          req->tidoffset / req->omfactor);
                KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
-                         !!(req->omfactor - KDETH_OM_SMALL));
+                         req->omfactor != KDETH_OM_SMALL);
        }
 done:
        trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
index bc95c4112c615eeec6fef748088f74dae0939fa1..d8fb056526f8ba079bc7f57feffa63fd0ffa3415 100644 (file)
@@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
 
 struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
                                struct rvt_qp *qp)
+       __must_hold(&qp->s_lock)
 {
        struct verbs_txreq *tx = ERR_PTR(-EBUSY);
-       unsigned long flags;
 
-       spin_lock_irqsave(&qp->s_lock, flags);
        write_seqlock(&dev->iowait_lock);
        if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
                struct hfi1_qp_priv *priv;
@@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
        }
 out:
        write_sequnlock(&dev->iowait_lock);
-       spin_unlock_irqrestore(&qp->s_lock, flags);
        return tx;
 }
 
index 1cf69b2fe4a52ef68f1e0dd1b9e954fe74db94dc..a1d6e0807f97161f5336120ab7e6d0ffdb175b4e 100644 (file)
@@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
 
 static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
                                            struct rvt_qp *qp)
+       __must_hold(&qp->slock)
 {
        struct verbs_txreq *tx;
        struct hfi1_qp_priv *priv = qp->priv;
index 8b95320345583802c9263cfd308a877af54bc014..b738acdb9b027705f11257c8ab25cbaeba652159 100644 (file)
 
 #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
 #define IW_CFG_FPM_QP_COUNT            32768
+#define I40IW_MAX_PAGES_PER_FMR                512
+#define I40IW_MIN_PAGES_PER_FMR                1
 
 #define I40IW_MTU_TO_MSS               40
 #define I40IW_DEFAULT_MSS              1460
index c963cad92f5a8eb061af74ae964e9222115e1815..6e9081380a276cbb78da8687820b9c7092f684af 100644 (file)
@@ -600,8 +600,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
        cqp_init_info.scratch_array = cqp->scratch_array;
        status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
        if (status) {
-               i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
-                            status, maj_err, min_err);
+               i40iw_pr_err("cqp init status %d\n", status);
                goto exit;
        }
        status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
index 02a735b64208c83f35e00e3dd29b4be1bba5b068..283b64c942eebfea6378ee8f4ad5206e6f549876 100644 (file)
@@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
        props->max_qp_init_rd_atom = props->max_qp_rd_atom;
        props->atomic_cap = IB_ATOMIC_NONE;
        props->max_map_per_fmr = 1;
+       props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
        return 0;
 }
 
@@ -1473,6 +1474,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr
        info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
        info->pd_id = iwpd->sc_pd.pd_id;
        info->total_len = iwmr->length;
+       info->remote_access = true;
        cqp_info->cqp_cmd = OP_ALLOC_STAG;
        cqp_info->post_sq = 1;
        cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
@@ -1527,7 +1529,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
        mutex_lock(&iwdev->pbl_mutex);
        status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
        mutex_unlock(&iwdev->pbl_mutex);
-       if (!status)
+       if (status)
                goto err1;
 
        if (palloc->level != I40IW_LEVEL_1)
@@ -2149,6 +2151,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
                        struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
                        struct i40iw_fast_reg_stag_info info;
 
+                       memset(&info, 0, sizeof(info));
                        info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
                        info.access_rights |= i40iw_get_user_access(flags);
                        info.stag_key = reg_wr(ib_wr)->key & 0xff;
@@ -2158,10 +2161,14 @@ static int i40iw_post_send(struct ib_qp *ibqp,
                        info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
                        info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
                        info.total_len = iwmr->ibmr.length;
+                       info.reg_addr_pa = *(u64 *)palloc->level1.addr;
                        info.first_pm_pbl_index = palloc->level1.idx;
                        info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
                        info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
 
+                       if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
+                               info.chunk_size = 1;
+
                        if (page_shift == 21)
                                info.page_size = 1; /* 2M page */
 
@@ -2327,13 +2334,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq,
 {
        struct i40iw_cq *iwcq;
        struct i40iw_cq_uk *ukcq;
-       enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
+       unsigned long flags;
+       enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
 
        iwcq = (struct i40iw_cq *)ibcq;
        ukcq = &iwcq->sc_cq.cq_uk;
-       if (notify_flags == IB_CQ_NEXT_COMP)
-               cq_notify = IW_CQ_COMPL_EVENT;
+       if (notify_flags == IB_CQ_SOLICITED)
+               cq_notify = IW_CQ_COMPL_SOLICITED;
+       spin_lock_irqsave(&iwcq->lock, flags);
        ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
+       spin_unlock_irqrestore(&iwcq->lock, flags);
        return 0;
 }
 
index 105246fba2e7c381958c2e5e74b3a4af7b1441a8..5fc62336273132a80eebe1e8f3045758edd12d72 100644 (file)
@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
 
        ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
        ah->av.ib.g_slid  = ah_attr->src_path_bits;
+       ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
        if (ah_attr->ah_flags & IB_AH_GRH) {
                ah->av.ib.g_slid   |= 0x80;
                ah->av.ib.gid_index = ah_attr->grh.sgid_index;
@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
                       !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
                        --ah->av.ib.stat_rate;
        }
-       ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
 
        return &ah->ibah;
 }
index d68f506c1922e8d18af269a4fcde33fd0e3ae5d5..9c2e53d28f985740c3d22f9c13be776e58010567 100644 (file)
@@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
                tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
        spin_unlock(&tun_qp->tx_lock);
        if (ret)
-               goto out;
+               goto end;
 
        tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
        if (tun_qp->tx_ring[tun_tx_ix].ah)
@@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
        wr.wr.send_flags = IB_SEND_SIGNALED;
 
        ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
-out:
-       if (ret)
-               ib_destroy_ah(ah);
+       if (!ret)
+               return 0;
+ out:
+       spin_lock(&tun_qp->tx_lock);
+       tun_qp->tx_ix_tail++;
+       spin_unlock(&tun_qp->tx_lock);
+       tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+       ib_destroy_ah(ah);
        return ret;
 }
 
@@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
 
 
        ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+       if (!ret)
+               return 0;
+
+       spin_lock(&sqp->tx_lock);
+       sqp->tx_ix_tail++;
+       spin_unlock(&sqp->tx_lock);
+       sqp->tx_ring[wire_tx_ix].ah = NULL;
 out:
-       if (ret)
-               ib_destroy_ah(ah);
+       ib_destroy_ah(ah);
        return ret;
 }
 
index b01ef6eee6e838b550758c8e4865bab7c1fceb09..42a46078d7d52755109784e769f29c2e5532bbca 100644 (file)
@@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
                        props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
                else
                        props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
-       if (dev->steering_support ==  MLX4_STEERING_MODE_DEVICE_MANAGED)
-               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
        }
+       if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
 
        props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
 
@@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
        int is_bonded = mlx4_is_bonded(dev);
 
+       if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
+               return ERR_PTR(-EINVAL);
+
        if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
            (flow_attr->type != IB_FLOW_ATTR_NORMAL))
                return ERR_PTR(-EOPNOTSUPP);
index 6c5ac5d8f32ffcfafbb4dd7a4b9202b4049b95b5..29acda249612dd444ee42c37494674cc4e482790 100644 (file)
@@ -139,7 +139,7 @@ struct mlx4_ib_mr {
        u32                     max_pages;
        struct mlx4_mr          mmr;
        struct ib_umem         *umem;
-       void                    *pages_alloc;
+       size_t                  page_map_size;
 };
 
 struct mlx4_ib_mw {
index 631272172a0b9793c0074e6ab9c00a4b9f2e4665..5d73989d977135d9d560add27e074417ceaca258 100644 (file)
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
                      struct mlx4_ib_mr *mr,
                      int max_pages)
 {
-       int size = max_pages * sizeof(u64);
-       int add_size;
        int ret;
 
-       add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+       /* Ensure that size is aligned to DMA cacheline
+        * requirements.
+        * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+        * so page_map_size will never cross PAGE_SIZE.
+        */
+       mr->page_map_size = roundup(max_pages * sizeof(u64),
+                                   MLX4_MR_PAGES_ALIGN);
 
-       mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
-       if (!mr->pages_alloc)
+       /* Prevent cross page boundary allocation. */
+       mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+       if (!mr->pages)
                return -ENOMEM;
 
-       mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
-
        mr->page_map = dma_map_single(device->dma_device, mr->pages,
-                                     size, DMA_TO_DEVICE);
+                                     mr->page_map_size, DMA_TO_DEVICE);
 
        if (dma_mapping_error(device->dma_device, mr->page_map)) {
                ret = -ENOMEM;
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
        }
 
        return 0;
-err:
-       kfree(mr->pages_alloc);
 
+err:
+       free_page((unsigned long)mr->pages);
        return ret;
 }
 
@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
 {
        if (mr->pages) {
                struct ib_device *device = mr->ibmr.device;
-               int size = mr->max_pages * sizeof(u64);
 
                dma_unmap_single(device->dma_device, mr->page_map,
-                                size, DMA_TO_DEVICE);
-               kfree(mr->pages_alloc);
+                                mr->page_map_size, DMA_TO_DEVICE);
+               free_page((unsigned long)mr->pages);
                mr->pages = NULL;
        }
 }
@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
        mr->npages = 0;
 
        ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
-                                  sizeof(u64) * mr->max_pages,
-                                  DMA_TO_DEVICE);
+                                  mr->page_map_size, DMA_TO_DEVICE);
 
        rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
 
        ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
-                                     sizeof(u64) * mr->max_pages,
-                                     DMA_TO_DEVICE);
+                                     mr->page_map_size, DMA_TO_DEVICE);
 
        return rc;
 }
index 81b0e1fbec1d96ddce580e4021f2560589cc22b2..8db8405c1e99e72a1e50206182bd17ad1219d47d 100644 (file)
@@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
                        sizeof (struct mlx4_wqe_raddr_seg);
        case MLX4_IB_QPT_RC:
                return sizeof (struct mlx4_wqe_ctrl_seg) +
-                       sizeof (struct mlx4_wqe_atomic_seg) +
+                       sizeof (struct mlx4_wqe_masked_atomic_seg) +
                        sizeof (struct mlx4_wqe_raddr_seg);
        case MLX4_IB_QPT_SMI:
        case MLX4_IB_QPT_GSI:
@@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
        {
                err = create_qp_common(to_mdev(pd->device), pd, init_attr,
                                       udata, 0, &qp, gfp);
-               if (err)
+               if (err) {
+                       kfree(qp);
                        return ERR_PTR(err);
+               }
 
                qp->ibqp.qp_num = qp->mqp.qpn;
                qp->xrcdn = xrcdn;
index dabcc65bd65e3b6a93adb47b9b53bbd67d08e15c..9c0e67bd2ba7326cf711e3eba0cf2e6bbd630d3e 100644 (file)
@@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
        int eqn;
        int err;
 
-       if (entries < 0)
+       if (entries < 0 ||
+           (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
                return ERR_PTR(-EINVAL);
 
        if (check_cq_create_flags(attr->flags))
@@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                return -ENOSYS;
        }
 
-       if (entries < 1)
+       if (entries < 1 ||
+           entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+               mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+                            entries,
+                            1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
                return -EINVAL;
+       }
 
        entries = roundup_pow_of_two(entries + 1);
-       if (entries >  (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+       if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
                return -EINVAL;
 
        if (entries == ibcq->cqe + 1)
index 1534af1130588f03b5be1f4fae333d4277a02486..364aab9f3c9e23a1963aa22c082b864fb0125914 100644 (file)
@@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
        pma_cnt_ext->port_xmit_data =
                cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
                                         transmitted_ib_multicast.octets) >> 2);
-       pma_cnt_ext->port_xmit_data =
+       pma_cnt_ext->port_rcv_data =
                cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
                                         received_ib_multicast.octets) >> 2);
        pma_cnt_ext->port_xmit_packets =
index c72797cd9e4f199302ededac309b133d4ddd1b27..b48ad85315dc671da59f0453e8d1393a3c3d1a43 100644 (file)
@@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
            MLX5_CAP_ETH(dev->mdev, scatter_fcs))
                props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
 
+       if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+               props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
        props->vendor_part_id      = mdev->pdev->device;
        props->hw_ver              = mdev->pdev->revision;
 
@@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
        gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
        resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
-       resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+       if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+               resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
        resp.cache_line_size = L1_CACHE_BYTES;
        resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
        resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
@@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        if (field_avail(typeof(resp), cqe_version, udata->outlen))
                resp.response_length += sizeof(resp.cqe_version);
 
-       if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+       /*
+        * We don't want to expose information from the PCI bar that is located
+        * after 4096 bytes, so if the arch only supports larger pages, let's
+        * pretend we don't support reading the HCA's core clock. This is also
+        * forced by mmap function.
+        */
+       if (PAGE_SIZE <= 4096 &&
+           field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
                resp.comp_mask |=
                        MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
                resp.hca_core_clock_offset =
@@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
 {
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-       return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+       return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
                       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
@@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
                break;
 
        case MLX5_DEV_EVENT_PORT_DOWN:
+       case MLX5_DEV_EVENT_PORT_INITIALIZED:
                ibev.event = IB_EVENT_PORT_ERR;
                port = (u8)param;
                break;
 
-       case MLX5_DEV_EVENT_PORT_INITIALIZED:
-               /* not used by ULPs */
-               return;
-
        case MLX5_DEV_EVENT_LID_CHANGE:
                ibev.event = IB_EVENT_LID_CHANGE;
                port = (u8)param;
index 504117657d41ffccd9d65467b748bede40fb1054..ce0a7ab35a227c569deae7f6b322b4c84eb5dcec 100644 (file)
@@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
                qp->rq.max_gs = 0;
                qp->rq.wqe_cnt = 0;
                qp->rq.wqe_shift = 0;
+               cap->max_recv_wr = 0;
+               cap->max_recv_sge = 0;
        } else {
                if (ucmd) {
                        qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                         const struct ib_ah_attr *ah,
                         struct mlx5_qp_path *path, u8 port, int attr_mask,
-                        u32 path_flags, const struct ib_qp_attr *attr)
+                        u32 path_flags, const struct ib_qp_attr *attr,
+                        bool alt)
 {
        enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
        int err;
 
        if (attr_mask & IB_QP_PKEY_INDEX)
-               path->pkey_index = attr->pkey_index;
+               path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
+                                                    attr->pkey_index);
 
        if (ah->ah_flags & IB_AH_GRH) {
                if (ah->grh.sgid_index >=
@@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                                                          ah->grh.sgid_index);
                path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
        } else {
-               path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
-               path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
-                                                                       0;
+               path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+               path->fl_free_ar |=
+                       (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
                path->rlid = cpu_to_be16(ah->dlid);
                path->grh_mlid = ah->src_path_bits & 0x7f;
                if (ah->ah_flags & IB_AH_GRH)
@@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        path->port = port;
 
        if (attr_mask & IB_QP_TIMEOUT)
-               path->ackto_lt = attr->timeout << 3;
+               path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
 
        if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
                return modify_raw_packet_eth_prio(dev->mdev,
@@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
 
        if (attr_mask & IB_QP_PKEY_INDEX)
-               context->pri_path.pkey_index = attr->pkey_index;
+               context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
 
        /* todo implement counter_index functionality */
 
@@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (attr_mask & IB_QP_AV) {
                err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
                                    attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
-                                   attr_mask, 0, attr);
+                                   attr_mask, 0, attr, false);
                if (err)
                        goto out;
        }
@@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (attr_mask & IB_QP_ALT_PATH) {
                err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
                                    &context->alt_path,
-                                   attr->alt_port_num, attr_mask, 0, attr);
+                                   attr->alt_port_num,
+                                   attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+                                   0, attr, true);
                if (err)
                        goto out;
        }
@@ -3326,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
                        return MLX5_FENCE_MODE_SMALL_AND_FENCE;
                else
                        return fence;
-
-       } else {
-               return 0;
+       } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
+               return MLX5_FENCE_MODE_FENCE;
        }
+
+       return 0;
 }
 
 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
@@ -4013,11 +4020,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
                to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
                to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
-               qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
+               qp_attr->alt_pkey_index =
+                       be16_to_cpu(context->alt_path.pkey_index);
                qp_attr->alt_port_num   = qp_attr->alt_ah_attr.port_num;
        }
 
-       qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
+       qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
        qp_attr->port_num = context->pri_path.port;
 
        /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
@@ -4079,17 +4087,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
        qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
 
        if (!ibqp->uobject) {
-               qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;
+               qp_attr->cap.max_send_wr  = qp->sq.max_post;
                qp_attr->cap.max_send_sge = qp->sq.max_gs;
+               qp_init_attr->qp_context = ibqp->qp_context;
        } else {
                qp_attr->cap.max_send_wr  = 0;
                qp_attr->cap.max_send_sge = 0;
        }
 
-       /* We don't support inline sends for kernel QPs (yet), and we
-        * don't know what userspace's value should be.
-        */
-       qp_attr->cap.max_inline_data = 0;
+       qp_init_attr->qp_type = ibqp->qp_type;
+       qp_init_attr->recv_cq = ibqp->recv_cq;
+       qp_init_attr->send_cq = ibqp->send_cq;
+       qp_init_attr->srq = ibqp->srq;
+       qp_attr->cap.max_inline_data = qp->max_inline_data;
 
        qp_init_attr->cap            = qp_attr->cap;
 
index ff946d5f59e4071606ebc00fff43df5a56dde2f6..382466a90da7331e4492fab59d60bf904db36df1 100644 (file)
@@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
 
        switch (cmd.type) {
        case QIB_CMD_ASSIGN_CTXT:
+               if (rcd) {
+                       ret = -EINVAL;
+                       goto bail;
+               }
+
                ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
                if (ret)
                        goto bail;
index 7209fbc03ccb90a7360f403fe99303b58c7932ee..a0b6ebee4d8a047e2fdffb8bbd831e5c36107fec 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
 #include <linux/iommu.h>
 #include <linux/workqueue.h>
 #include <linux/list.h>
@@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
        int i;
        int flags;
        dma_addr_t pa;
-       DEFINE_DMA_ATTRS(attrs);
-
-       if (dmasync)
-               dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
        if (!can_do_mlock())
                return -EPERM;
index 5fa4d4d81ee0cfcc5389897c904e343ff4903897..41ba7e9cadaab29895fec78c0284ddfed2558949 100644 (file)
@@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
                        /* wrap to first map page, invert bit 0 */
                        offset = qpt->incr | ((offset & 1) ^ 1);
                }
-               /* there can be no bits at shift and below */
-               WARN_ON(offset & (rdi->dparms.qos_shift - 1));
+               /* there can be no set bits in low-order QoS bits */
+               WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
                qpn = mk_qpn(qpt, map, offset);
        }
 
@@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
  */
 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
                  enum ib_qp_type type)
+       __releases(&qp->s_lock)
+       __releases(&qp->s_hlock)
+       __releases(&qp->r_lock)
+       __acquires(&qp->r_lock)
+       __acquires(&qp->s_hlock)
+       __acquires(&qp->s_lock)
 {
        if (qp->state != IB_QPS_RESET) {
                qp->state = IB_QPS_RESET;
@@ -570,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
        qp->s_ssn = 1;
        qp->s_lsn = 0;
        qp->s_mig_state = IB_MIG_MIGRATED;
-       if (qp->s_ack_queue)
-               memset(
-                       qp->s_ack_queue,
-                       0,
-                       rvt_max_atomic(rdi) *
-                               sizeof(*qp->s_ack_queue));
        qp->r_head_ack_queue = 0;
        qp->s_tail_ack_queue = 0;
        qp->s_num_rd_atomic = 0;
@@ -699,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
                 * initialization that is needed.
                 */
                priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
-               if (!priv)
+               if (IS_ERR(priv)) {
+                       ret = priv;
                        goto bail_qp;
+               }
                qp->priv = priv;
                qp->timeout_jiffies =
                        usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
index e1cc2cc42f2550e1a2113b8c471f4456b182dd14..30c4fda7a05a6552e5a26d5dc1e52af200959a17 100644 (file)
@@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
                            !rdi->driver_f.quiesce_qp ||
                            !rdi->driver_f.notify_error_qp ||
                            !rdi->driver_f.mtu_from_qp ||
-                           !rdi->driver_f.mtu_to_path_mtu ||
-                           !rdi->driver_f.shut_down_port ||
-                           !rdi->driver_f.cap_mask_chg)
+                           !rdi->driver_f.mtu_to_path_mtu)
                                return -EINVAL;
                break;
 
index bab7db6fa9abf8c045b9e1b7e7a55f8996bf3745..4f7d9b48df643c7ef5f69b80f870a0f15c6efe87 100644 (file)
@@ -94,6 +94,7 @@ enum {
        IPOIB_NEIGH_TBL_FLUSH     = 12,
        IPOIB_FLAG_DEV_ADDR_SET   = 13,
        IPOIB_FLAG_DEV_ADDR_CTRL  = 14,
+       IPOIB_FLAG_GOING_DOWN     = 15,
 
        IPOIB_MAX_BACKOFF_SECONDS = 16,
 
index b2f42835d76d51cfed64266d6f884f46de3a11d1..951d9abcca8b283e652368c7c9c96f1fa25f3df4 100644 (file)
@@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
 {
        struct net_device *dev = to_net_dev(d);
        int ret;
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
+               return -EPERM;
 
        if (!rtnl_trylock())
                return restart_syscall();
index 45c40a17d6a6ffc60cac0cab92bc24c689f75c83..dc6d241b9406e9661394c7e985fda4b0eacbfa6b 100644 (file)
@@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
        if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
                return false;
 
-       netif_addr_lock(priv->dev);
+       netif_addr_lock_bh(priv->dev);
 
        /* The subnet prefix may have changed, update it now so we won't have
         * to do it later
@@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
 
        search_gid.global.interface_id = priv->local_gid.global.interface_id;
 
-       netif_addr_unlock(priv->dev);
+       netif_addr_unlock_bh(priv->dev);
 
        err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
                          priv->dev, &port, &index);
 
-       netif_addr_lock(priv->dev);
+       netif_addr_lock_bh(priv->dev);
 
        if (search_gid.global.interface_id !=
            priv->local_gid.global.interface_id)
@@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
        }
 
 out:
-       netif_addr_unlock(priv->dev);
+       netif_addr_unlock_bh(priv->dev);
 
        return ret;
 }
index 2d7c1634664826a83f1f4efe42b1004c3b23d09f..5f58c41ef787d22692920855f397c1bbcb079e6e 100644 (file)
@@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
                                neigh = NULL;
                                goto out_unlock;
                        }
-                       neigh->alive = jiffies;
+
+                       if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+                               neigh->alive = jiffies;
                        goto out_unlock;
                }
        }
@@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
        struct ipoib_dev_priv *child_priv;
        struct net_device *netdev = priv->dev;
 
-       netif_addr_lock(netdev);
+       netif_addr_lock_bh(netdev);
 
        memcpy(&priv->local_gid.global.interface_id,
               &gid->global.interface_id,
@@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
        memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
        clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
 
-       netif_addr_unlock(netdev);
+       netif_addr_unlock_bh(netdev);
 
        if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
                down_read(&priv->vlan_rwsem);
@@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
        union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
        int ret = 0;
 
-       netif_addr_lock(dev);
+       netif_addr_lock_bh(dev);
 
        /* Make sure the QPN, reserved and subnet prefix match the current
         * lladdr, it also makes sure the lladdr is unicast.
@@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev,
            gid->global.interface_id == 0)
                ret = -EINVAL;
 
-       netif_addr_unlock(dev);
+       netif_addr_unlock_bh(dev);
 
        return ret;
 }
@@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
                ib_unregister_event_handler(&priv->event_handler);
                flush_workqueue(ipoib_workqueue);
 
+               /* mark interface in the middle of destruction */
+               set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+
                rtnl_lock();
                dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
                rtnl_unlock();
index 82fbc9442608f6e10fb0b851a08baf32cfacc5c8..d3394b6add24a0303dd51710d72f86087a36c7fe 100644 (file)
@@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
                return;
        }
        priv->local_lid = port_attr.lid;
-       netif_addr_lock(dev);
+       netif_addr_lock_bh(dev);
 
        if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
-               netif_addr_unlock(dev);
+               netif_addr_unlock_bh(dev);
                return;
        }
-       netif_addr_unlock(dev);
+       netif_addr_unlock_bh(dev);
 
        spin_lock_irq(&priv->lock);
        if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
index 64a35595eab83783ade2451f928832548fcb7271..a2f9f29c6ab589e746170ac6463dd6c7781cd20e 100644 (file)
@@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 
        ppriv = netdev_priv(pdev);
 
+       if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+               return -EPERM;
+
        snprintf(intf_name, sizeof intf_name, "%s.%04x",
                 ppriv->dev->name, pkey);
        priv = ipoib_intf_alloc(intf_name);
@@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
 
        ppriv = netdev_priv(pdev);
 
+       if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+               return -EPERM;
+
        if (!rtnl_trylock())
                return restart_syscall();
 
index 646de170ec12e44a9eb20661fa4614e54dd0afe2..3322ed750172ea78f2f57cc1ad08f6f9946eca63 100644 (file)
@@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
 {
        unsigned int sg_offset = 0;
 
-       state->desc = req->indirect_desc;
        state->fr.next = req->fr_list;
        state->fr.end = req->fr_list + ch->target->mr_per_cmd;
        state->sg = scat;
@@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
        struct scatterlist *sg;
        int i;
 
-       state->desc = req->indirect_desc;
        for_each_sg(scat, sg, count, i) {
                srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
                             ib_sg_dma_len(dev->dev, sg),
@@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
                                   target->indirect_size, DMA_TO_DEVICE);
 
        memset(&state, 0, sizeof(state));
+       state.desc = req->indirect_desc;
        if (dev->use_fast_reg)
                ret = srp_map_sg_fr(&state, ch, req, scat, count);
        else if (dev->use_fmr)
@@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device)
        int mr_page_shift, p;
        u64 max_pages_per_mr;
 
-       srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
+       srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
        if (!srp_dev)
                return;
 
@@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device)
                                                   IB_ACCESS_REMOTE_WRITE);
                if (IS_ERR(srp_dev->global_mr))
                        goto err_pd;
-       } else {
-               srp_dev->global_mr = NULL;
        }
 
        for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
index e68b20cba70b344d7b33512a6d9c868fb3d2ff28..4a4155640d516252fba99c5728bb794e005218cf 100644 (file)
@@ -1638,8 +1638,7 @@ retry:
         */
        qp_init->cap.max_send_wr = srp_sq_size / 2;
        qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
-       qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd,
-                                       sdev->device->attrs.max_sge);
+       qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
        qp_init->port_num = ch->sport->port;
 
        ch->qp = ib_create_qp(sdev->pd, qp_init);
index fee6bfd7ca218f138e38f58318bbc56f12402f5b..389030487da7eecb521342e8a83859b39b3523a6 100644 (file)
@@ -106,6 +106,7 @@ enum {
        SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
 
        SRPT_DEF_SG_TABLESIZE = 128,
+       SRPT_DEF_SG_PER_WQE = 16,
 
        MIN_SRPT_SQ_SIZE = 16,
        DEF_SRPT_SQ_SIZE = 4096,
index 804dbcc37d3f70bc929d30bc63b13b9464f9aee0..a529a4535457217e761fd1e8f8b0748cb48a5498 100644 (file)
@@ -1031,17 +1031,17 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
 
        case XTYPE_XBOXONE:
                packet->data[0] = 0x09; /* activate rumble */
-               packet->data[1] = 0x08;
+               packet->data[1] = 0x00;
                packet->data[2] = xpad->odata_serial++;
-               packet->data[3] = 0x08; /* continuous effect */
-               packet->data[4] = 0x00; /* simple rumble mode */
-               packet->data[5] = 0x03; /* L and R actuator only */
-               packet->data[6] = 0x00; /* TODO: LT actuator */
-               packet->data[7] = 0x00; /* TODO: RT actuator */
+               packet->data[3] = 0x09;
+               packet->data[4] = 0x00;
+               packet->data[5] = 0x0F;
+               packet->data[6] = 0x00;
+               packet->data[7] = 0x00;
                packet->data[8] = strong / 512; /* left actuator */
                packet->data[9] = weak / 512;   /* right actuator */
-               packet->data[10] = 0x80;        /* length of pulse */
-               packet->data[11] = 0x00;        /* stop period of pulse */
+               packet->data[10] = 0xFF;
+               packet->data[11] = 0x00;
                packet->data[12] = 0x00;
                packet->len = 13;
                packet->pending = true;
@@ -1431,22 +1431,15 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
        int ep_irq_in_idx;
        int i, error;
 
+       if (intf->cur_altsetting->desc.bNumEndpoints != 2)
+               return -ENODEV;
+
        for (i = 0; xpad_device[i].idVendor; i++) {
                if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
                    (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
                        break;
        }
 
-       if (xpad_device[i].xtype == XTYPE_XBOXONE &&
-           intf->cur_altsetting->desc.bInterfaceNumber != 0) {
-               /*
-                * The Xbox One controller lists three interfaces all with the
-                * same interface class, subclass and protocol. Differentiate by
-                * interface number.
-                */
-               return -ENODEV;
-       }
-
        xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
        if (!xpad)
                return -ENOMEM;
@@ -1478,6 +1471,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
                        if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
                                xpad->xtype = XTYPE_XBOX360W;
+                       else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208)
+                               xpad->xtype = XTYPE_XBOXONE;
                        else
                                xpad->xtype = XTYPE_XBOX360;
                } else {
@@ -1492,6 +1487,17 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
                        xpad->mapping |= MAP_STICKS_TO_NULL;
        }
 
+       if (xpad->xtype == XTYPE_XBOXONE &&
+           intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+               /*
+                * The Xbox One controller lists three interfaces all with the
+                * same interface class, subclass and protocol. Differentiate by
+                * interface number.
+                */
+               error = -ENODEV;
+               goto err_free_in_urb;
+       }
+
        error = xpad_init_output(intf, xpad);
        if (error)
                goto err_free_in_urb;
index 78f93cf68840d24328ebd0630f86c4e372d051bf..be5b399da5d3ec7b982fa49945bcc4d3be576b39 100644 (file)
@@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data *etd)
                case 5:
                        etd->hw_version = 3;
                        break;
-               case 6:
-               case 7:
-               case 8:
-               case 9:
-               case 10:
-               case 13:
-               case 14:
+               case 6 ... 14:
                        etd->hw_version = 4;
                        break;
                default:
index a3f0f5a47490e936e31b45594861d692503429b1..0f586780ceb4bee18a38428b889dba8d994ea4c7 100644 (file)
@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
                return -ENXIO;
        }
 
-       if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
-               psmouse_dbg(psmouse, "VMMouse port in use.\n");
-               return -EBUSY;
-       }
-
        /* Check if the device is present */
        response = ~VMMOUSE_PROTO_MAGIC;
        VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
-       if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
-               release_region(VMMOUSE_PROTO_PORT, 4);
+       if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
                return -ENXIO;
-       }
 
        if (set_properties) {
                psmouse->vendor = VMMOUSE_VENDOR;
@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
                psmouse->model = version;
        }
 
-       release_region(VMMOUSE_PROTO_PORT, 4);
-
        return 0;
 }
 
@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
        psmouse_reset(psmouse);
        input_unregister_device(priv->abs_dev);
        kfree(priv);
-       release_region(VMMOUSE_PROTO_PORT, 4);
 }
 
 /**
@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
        struct input_dev *rel_dev = psmouse->dev, *abs_dev;
        int error;
 
-       if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
-               psmouse_dbg(psmouse, "VMMouse port in use.\n");
-               return -EBUSY;
-       }
-
        psmouse_reset(psmouse);
        error = vmmouse_enable(psmouse);
        if (error)
-               goto release_region;
+               return error;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        abs_dev = input_allocate_device();
@@ -502,8 +487,5 @@ init_fail:
        kfree(priv);
        psmouse->private = NULL;
 
-release_region:
-       release_region(VMMOUSE_PROTO_PORT, 4);
-
        return error;
 }
index b368b0515c5a3aa26161dd7893a1c22c2678e4b2..253df96be4276cc9196a6c0ab25b04f838d061ac 100644 (file)
@@ -157,11 +157,11 @@ static int rmi_function_match(struct device *dev, struct device_driver *drv)
 static void rmi_function_of_probe(struct rmi_function *fn)
 {
        char of_name[9];
+       struct device_node *node = fn->rmi_dev->xport->dev->of_node;
 
        snprintf(of_name, sizeof(of_name), "rmi4-f%02x",
                fn->fd.function_number);
-       fn->dev.of_node = of_find_node_by_name(
-                               fn->rmi_dev->xport->dev->of_node, of_name);
+       fn->dev.of_node = of_get_child_by_name(node, of_name);
 }
 #else
 static inline void rmi_function_of_probe(struct rmi_function *fn)
index 8dd3fb5e1f9433f013f18c51728205c425a61560..88e91559c84e2dcad267a0e20fd4e65bc56238bb 100644 (file)
@@ -66,7 +66,7 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
        struct rmi_device *rmi_dev = fn->rmi_dev;
        int ret;
        int offset;
-       u8 buf[14];
+       u8 buf[15];
        int pitch_x = 0;
        int pitch_y = 0;
        int clip_x_low = 0;
@@ -86,9 +86,10 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
 
        offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
 
-       if (item->reg_size > 14) {
-               dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
-                       item->reg_size);
+       if (item->reg_size > sizeof(buf)) {
+               dev_err(&fn->dev,
+                       "F12 control8 should be no bigger than %zd bytes, not: %ld\n",
+                       sizeof(buf), item->reg_size);
                return -ENODEV;
        }
 
index 3c3dd78303be0e68f1373cb01d6609be6a1efbc7..fed73eeb47b3c6e263ca2e0415d9b385cc2f4854 100644 (file)
@@ -118,6 +118,13 @@ static int ts4800_parse_dt(struct platform_device *pdev,
                return -ENODEV;
        }
 
+       ts->regmap = syscon_node_to_regmap(syscon_np);
+       of_node_put(syscon_np);
+       if (IS_ERR(ts->regmap)) {
+               dev_err(dev, "cannot get parent's regmap\n");
+               return PTR_ERR(ts->regmap);
+       }
+
        error = of_property_read_u32_index(np, "syscon", 1, &reg);
        if (error < 0) {
                dev_err(dev, "no offset in syscon\n");
@@ -134,12 +141,6 @@ static int ts4800_parse_dt(struct platform_device *pdev,
 
        ts->bit = BIT(bit);
 
-       ts->regmap = syscon_node_to_regmap(syscon_np);
-       if (IS_ERR(ts->regmap)) {
-               dev_err(dev, "cannot get parent's regmap\n");
-               return PTR_ERR(ts->regmap);
-       }
-
        return 0;
 }
 
index 7295c198aa086be8630b3a77e202415649c6937b..6fe55d598facac3a05114e57e03b92b9d162eb87 100644 (file)
 #include <linux/regmap.h>
 #include "tsc200x-core.h"
 
+static const struct input_id tsc2004_input_id = {
+       .bustype = BUS_I2C,
+       .product = 2004,
+};
+
 static int tsc2004_cmd(struct device *dev, u8 cmd)
 {
        u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
                         const struct i2c_device_id *id)
 
 {
-       return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+       return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
                             devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
                             tsc2004_cmd);
 }
index b9f593dfd2ef8368223e5d7d7d9ca06856afcc8d..f2c5f0e47f77dd6ab177adec2bb102ed0da5dd08 100644 (file)
 #include <linux/regmap.h>
 #include "tsc200x-core.h"
 
+static const struct input_id tsc2005_input_id = {
+       .bustype = BUS_SPI,
+       .product = 2005,
+};
+
 static int tsc2005_cmd(struct device *dev, u8 cmd)
 {
        u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
        if (error)
                return error;
 
-       return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+       return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
                             devm_regmap_init_spi(spi, &tsc200x_regmap_config),
                             tsc2005_cmd);
 }
index 15240c1ee850abd4f5099db9a9e21a4b8ecf725b..dfa7f1c4f5453bd9d1544badc24548eab0851361 100644 (file)
@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
        mutex_unlock(&ts->mutex);
 }
 
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
                  struct regmap *regmap,
                  int (*tsc200x_cmd)(struct device *dev, u8 cmd))
 {
@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
        snprintf(ts->phys, sizeof(ts->phys),
                 "%s/input-ts", dev_name(dev));
 
-       input_dev->name = "TSC200X touchscreen";
+       if (tsc_id->product == 2004) {
+               input_dev->name = "TSC200X touchscreen";
+       } else {
+               input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+                                                "TSC%04d touchscreen",
+                                                tsc_id->product);
+               if (!input_dev->name)
+                       return -ENOMEM;
+       }
+
        input_dev->phys = ts->phys;
-       input_dev->id.bustype = bustype;
+       input_dev->id = *tsc_id;
        input_dev->dev.parent = dev;
        input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
        input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
index 7a482d1026148c692988172773af1c3f2ad9e57e..49a63a3c684094a331a7465dda370f1a2f0fcb21 100644 (file)
@@ -70,7 +70,7 @@
 extern const struct regmap_config tsc200x_regmap_config;
 extern const struct dev_pm_ops tsc200x_pm_ops;
 
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
                  struct regmap *regmap,
                  int (*tsc200x_cmd)(struct device *dev, u8 cmd));
 int tsc200x_remove(struct device *dev);
index bab3c6acf6a228f13fcb44756b43374293bf030f..b6fc4bde79ded75430725f97f72d7a9c3bae2c6b 100644 (file)
@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 
-#define W8001_MAX_LENGTH       11
+#define W8001_MAX_LENGTH       13
 #define W8001_LEAD_MASK                0x80
 #define W8001_LEAD_BYTE                0x80
 #define W8001_TAB_MASK         0x40
@@ -155,6 +155,7 @@ static void parse_multi_touch(struct w8001 *w8001)
                bool touch = data[0] & (1 << i);
 
                input_mt_slot(dev, i);
+               input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
                if (touch) {
                        x = (data[6 * i + 1] << 7) | data[6 * i + 2];
                        y = (data[6 * i + 3] << 7) | data[6 * i + 4];
@@ -339,6 +340,15 @@ static irqreturn_t w8001_interrupt(struct serio *serio,
                w8001->idx = 0;
                parse_multi_touch(w8001);
                break;
+
+       default:
+               /*
+                * ThinkPad X60 Tablet PC (pen only device) sometimes
+                * sends invalid data packets that are larger than
+                * W8001_PKTLEN_TPCPEN. Let's start over again.
+                */
+               if (!w8001->touch_dev && w8001->idx > W8001_PKTLEN_TPCPEN - 1)
+                       w8001->idx = 0;
        }
 
        return IRQ_HANDLED;
@@ -513,6 +523,8 @@ static int w8001_setup_touch(struct w8001 *w8001, char *basename,
                                        0, touch.x, 0, 0);
                input_set_abs_params(dev, ABS_MT_POSITION_Y,
                                        0, touch.y, 0, 0);
+               input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
+                                       0, MT_TOOL_MAX, 0, 0);
 
                strlcat(basename, " 2FG", basename_sz);
                if (w8001->max_pen_x && w8001->max_pen_y)
index 9e0034196e10468ea6e624b64737a1487dade7fa..59741ead7e158ca4e7b0d6506a0529c03c3fe412 100644 (file)
@@ -1107,13 +1107,13 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
                                break;
                        }
 
+                       devid = e->devid;
                        DUMP_printk("  DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
                                    hid, uid,
                                    PCI_BUS_NUM(devid),
                                    PCI_SLOT(devid),
                                    PCI_FUNC(devid));
 
-                       devid  = e->devid;
                        flags = e->flags;
 
                        ret = add_acpi_hid_device(hid, uid, &devid, false);
@@ -1568,13 +1568,23 @@ static int __init amd_iommu_init_pci(void)
                        break;
        }
 
+       /*
+        * Order is important here to make sure any unity map requirements are
+        * fulfilled. The unity mappings are created and written to the device
+        * table during the amd_iommu_init_api() call.
+        *
+        * After that we call init_device_table_dma() to make sure any
+        * uninitialized DTE will block DMA, and in the end we flush the caches
+        * of all IOMMUs to make sure the changes to the device table are
+        * active.
+        */
+       ret = amd_iommu_init_api();
+
        init_device_table_dma();
 
        for_each_iommu(iommu)
                iommu_flush_all_caches(iommu);
 
-       ret = amd_iommu_init_api();
-
        if (!ret)
                print_iommu_info();
 
index 94b68213c50dae945b9c0789afe8b6465dfd5411..5f6b3bcab0782be2d29d465390b1b4bd179bf433 100644 (file)
@@ -1941,6 +1941,7 @@ static struct iommu_ops arm_smmu_ops = {
        .attach_dev             = arm_smmu_attach_dev,
        .map                    = arm_smmu_map,
        .unmap                  = arm_smmu_unmap,
+       .map_sg                 = default_iommu_map_sg,
        .iova_to_phys           = arm_smmu_iova_to_phys,
        .add_device             = arm_smmu_add_device,
        .remove_device          = arm_smmu_remove_device,
index a644d0cec2d8275d202fd3d871673b7787dd258c..323dac9900ba3cc0a0c1351dfc39de86b4cb06ab 100644 (file)
@@ -3222,11 +3222,6 @@ static int __init init_dmars(void)
                        }
                }
 
-               iommu_flush_write_buffer(iommu);
-               iommu_set_root_entry(iommu);
-               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-
                if (!ecap_pass_through(iommu->ecap))
                        hw_pass_through = 0;
 #ifdef CONFIG_INTEL_IOMMU_SVM
@@ -3235,6 +3230,18 @@ static int __init init_dmars(void)
 #endif
        }
 
+       /*
+        * Now that qi is enabled on all iommus, set the root entry and flush
+        * caches. This is required on some Intel X58 chipsets, otherwise the
+        * flush_context function will loop forever and the boot hangs.
+        */
+       for_each_active_iommu(iommu, drhd) {
+               iommu_flush_write_buffer(iommu);
+               iommu_set_root_entry(iommu);
+               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+       }
+
        if (iommu_pass_through)
                iommu_identity_mapping |= IDENTMAP_ALL;
 
@@ -4595,13 +4602,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
        for (i = 0; i < g_num_of_iommus; i++) {
                struct intel_iommu *iommu = g_iommus[i];
                struct dmar_domain *domain;
-               u16 did;
+               int did;
 
                if (!iommu)
                        continue;
 
-               for (did = 0; did < 0xffff; did++) {
-                       domain = get_iommu_domain(iommu, did);
+               for (did = 0; did < cap_ndoms(iommu->cap); did++) {
+                       domain = get_iommu_domain(iommu, (u16)did);
 
                        if (!domain)
                                continue;
index ba764a0835d3cd881981f0e049956e6f71a62788..e23001bfcfee996f8ab18f1695d1e3476bc6b9c2 100644 (file)
@@ -420,8 +420,10 @@ retry:
 
                /* Try replenishing IOVAs by flushing rcache. */
                flushed_rcache = true;
+               preempt_disable();
                for_each_online_cpu(cpu)
                        free_cpu_cached_iovas(cpu, iovad);
+               preempt_enable();
                goto retry;
        }
 
@@ -749,7 +751,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
        bool can_insert = false;
        unsigned long flags;
 
-       cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+       cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
        spin_lock_irqsave(&cpu_rcache->lock, flags);
 
        if (!iova_magazine_full(cpu_rcache->loaded)) {
@@ -779,6 +781,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
                iova_magazine_push(cpu_rcache->loaded, iova_pfn);
 
        spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+       put_cpu_ptr(rcache->cpu_rcaches);
 
        if (mag_to_free) {
                iova_magazine_free_pfns(mag_to_free, iovad);
@@ -812,7 +815,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
        bool has_pfn = false;
        unsigned long flags;
 
-       cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+       cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
        spin_lock_irqsave(&cpu_rcache->lock, flags);
 
        if (!iova_magazine_empty(cpu_rcache->loaded)) {
@@ -834,6 +837,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
                iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
 
        spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+       put_cpu_ptr(rcache->cpu_rcaches);
 
        return iova_pfn;
 }
index c7d6156ff5360873b1394c746ba62b05d06113f5..25b4627cb57fa18e66d32548f18d8d22669a5bcb 100644 (file)
@@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
        dte_addr = virt_to_phys(rk_domain->dt);
        for (i = 0; i < iommu->num_mmu; i++) {
                rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
-               rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+               rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
                rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
        }
 
index 6bd881be24eafe2316f9cf137497c2f2dbef9109..5eb1f9e17a989ab1b392718df6f18eb49b4ca51f 100644 (file)
@@ -41,6 +41,7 @@
 
 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING          (1ULL << 0)
 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375      (1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144      (1ULL << 2)
 
 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING    (1 << 0)
 
@@ -82,6 +83,7 @@ struct its_node {
        u64                     flags;
        u32                     ite_size;
        u32                     device_ids;
+       int                     numa_node;
 };
 
 #define ITS_ITT_ALIGN          SZ_256
@@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d)
 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
-       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       unsigned int cpu;
+       const struct cpumask *cpu_mask = cpu_online_mask;
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
        struct its_collection *target_col;
        u32 id = its_get_event_id(d);
 
+       /* lpi cannot be routed to a redistributor that is on a foreign node */
+       if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+               if (its_dev->its->numa_node >= 0) {
+                       cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+                       if (!cpumask_intersects(mask_val, cpu_mask))
+                               return -EINVAL;
+               }
+       }
+
+       cpu = cpumask_any_and(mask_val, cpu_mask);
+
        if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
@@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void)
        list_for_each_entry(its, &its_nodes, entry) {
                u64 target;
 
+               /* avoid cross node collections and its mapping */
+               if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+                       struct device_node *cpu_node;
+
+                       cpu_node = of_get_cpu_node(cpu, NULL);
+                       if (its->numa_node != NUMA_NO_NODE &&
+                               its->numa_node != of_node_to_nid(cpu_node))
+                               continue;
+               }
+
                /*
                 * We now have to bind each collection to its target
                 * redistributor.
@@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
 {
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
        u32 event = its_get_event_id(d);
+       const struct cpumask *cpu_mask = cpu_online_mask;
+
+       /* get the cpu_mask of local node */
+       if (its_dev->its->numa_node >= 0)
+               cpu_mask = cpumask_of_node(its_dev->its->numa_node);
 
        /* Bind the LPI to the first possible CPU */
-       its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+       its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
 
        /* Map the GIC IRQ and event to the device */
        its_send_mapvi(its_dev, d->hwirq, event);
@@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
        its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
 }
 
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+       struct its_node *its = data;
+
+       its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+}
+
 static const struct gic_quirk its_quirks[] = {
 #ifdef CONFIG_CAVIUM_ERRATUM_22375
        {
@@ -1451,6 +1487,14 @@ static const struct gic_quirk its_quirks[] = {
                .mask   = 0xffff0fff,
                .init   = its_enable_quirk_cavium_22375,
        },
+#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+       {
+               .desc   = "ITS: Cavium erratum 23144",
+               .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
+               .mask   = 0xffff0fff,
+               .init   = its_enable_quirk_cavium_23144,
+       },
 #endif
        {
        }
@@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node,
        its->base = its_base;
        its->phys_base = res.start;
        its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+       its->numa_node = of_node_to_nid(node);
 
        its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
        if (!its->cmd_base) {
index fb042ba9a3dbbd239e6cbe6751be24978185eab8..2c5ba0e704bf149dc8416228a825a7a199629e8e 100644 (file)
@@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable)
 
        while (count--) {
                val = readl_relaxed(rbase + GICR_WAKER);
-               if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
+               if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
                        break;
                cpu_relax();
                udelay(1);
index 3b5e10aa48ab48443210edde5c0fc4350b54d294..70ed1d0151b8b271018d8fcf7465342843f38467 100644 (file)
@@ -718,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
 
        spin_lock_irqsave(&gic_lock, flags);
        gic_map_to_pin(intr, gic_cpu_pin);
-       gic_map_to_vpe(intr, vpe);
+       gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
        for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
                clear_bit(intr, pcpu_masks[i].pcpu_mask);
        set_bit(intr, pcpu_masks[vpe].pcpu_mask);
@@ -746,6 +746,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
                /* verify that it doesn't conflict with an IPI irq */
                if (test_bit(spec->hwirq, ipi_resrv))
                        return -EBUSY;
+
+               hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
+
+               return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+                                                    &gic_level_irq_controller,
+                                                    NULL);
        } else {
                base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
                if (base_hwirq == gic_shared_intrs) {
@@ -867,10 +873,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
                                                    &gic_level_irq_controller,
                                                    NULL);
                if (ret)
-                       return ret;
+                       goto error;
        }
 
        return 0;
+
+error:
+       irq_domain_free_irqs_parent(d, virq, nr_irqs);
+       return ret;
 }
 
 void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
@@ -949,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
        switch (bus_token) {
        case DOMAIN_BUS_IPI:
                is_ipi = d->bus_token == bus_token;
-               return to_of_node(d->fwnode) == node && is_ipi;
+               return (!node || to_of_node(d->fwnode) == node) && is_ipi;
                break;
        default:
                return 0;
index e7155db01d55db0da6c69d25f2da068e48b1f798..73addb4b625b97fd2ce45b92d5fea6179f404bc7 100644 (file)
@@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data,
        /* set polarity for external interrupts only */
        for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
                if (priv->ext_irqs[i] == data->hwirq) {
-                       ret = pic32_set_ext_polarity(i + 1, flow_type);
+                       ret = pic32_set_ext_polarity(i, flow_type);
                        if (ret)
                                return ret;
                }
index 3495d5d6547f65a6fe507f063dce397bc424496f..3bce44893021365a999724a9aff0cf17d462a95f 100644 (file)
@@ -53,11 +53,12 @@ static void led_timer_function(unsigned long data)
 
        if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
                led_set_brightness_nosleep(led_cdev, LED_OFF);
+               led_cdev->flags &= ~LED_BLINK_SW;
                return;
        }
 
        if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
-               led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+               led_cdev->flags &=  ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
                return;
        }
 
@@ -151,6 +152,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
                return;
        }
 
+       led_cdev->flags |= LED_BLINK_SW;
        mod_timer(&led_cdev->blink_timer, jiffies + 1);
 }
 
@@ -219,6 +221,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev)
        del_timer_sync(&led_cdev->blink_timer);
        led_cdev->blink_delay_on = 0;
        led_cdev->blink_delay_off = 0;
+       led_cdev->flags &= ~LED_BLINK_SW;
 }
 EXPORT_SYMBOL_GPL(led_stop_software_blink);
 
@@ -226,10 +229,10 @@ void led_set_brightness(struct led_classdev *led_cdev,
                        enum led_brightness brightness)
 {
        /*
-        * In case blinking is on delay brightness setting
+        * If software blink is active, delay brightness setting
         * until the next timer tick.
         */
-       if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
+       if (led_cdev->flags & LED_BLINK_SW) {
                /*
                 * If we need to disable soft blinking delegate this to the
                 * work queue task to avoid problems in case we are called
index 410c39c62dc7f1c3bdfa3b5dd305bbef031aef25..c9f386213e9ef16ae006e2270e2615e4bb60253e 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/leds.h>
 #include <linux/reboot.h>
+#include <linux/suspend.h>
 #include "../leds.h"
 
 static int panic_heartbeats;
@@ -154,6 +155,30 @@ static struct led_trigger heartbeat_led_trigger = {
        .deactivate = heartbeat_trig_deactivate,
 };
 
+static int heartbeat_pm_notifier(struct notifier_block *nb,
+                                unsigned long pm_event, void *unused)
+{
+       int rc;
+
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+       case PM_HIBERNATION_PREPARE:
+       case PM_RESTORE_PREPARE:
+               led_trigger_unregister(&heartbeat_led_trigger);
+               break;
+       case PM_POST_SUSPEND:
+       case PM_POST_HIBERNATION:
+       case PM_POST_RESTORE:
+               rc = led_trigger_register(&heartbeat_led_trigger);
+               if (rc)
+                       pr_err("could not re-register heartbeat trigger\n");
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
 static int heartbeat_reboot_notifier(struct notifier_block *nb,
                                     unsigned long code, void *unused)
 {
@@ -168,6 +193,10 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static struct notifier_block heartbeat_pm_nb = {
+       .notifier_call = heartbeat_pm_notifier,
+};
+
 static struct notifier_block heartbeat_reboot_nb = {
        .notifier_call = heartbeat_reboot_notifier,
 };
@@ -184,12 +213,14 @@ static int __init heartbeat_trig_init(void)
                atomic_notifier_chain_register(&panic_notifier_list,
                                               &heartbeat_panic_nb);
                register_reboot_notifier(&heartbeat_reboot_nb);
+               register_pm_notifier(&heartbeat_pm_nb);
        }
        return rc;
 }
 
 static void __exit heartbeat_trig_exit(void)
 {
+       unregister_pm_notifier(&heartbeat_pm_nb);
        unregister_reboot_notifier(&heartbeat_reboot_nb);
        atomic_notifier_chain_unregister(&panic_notifier_list,
                                         &heartbeat_panic_nb);
index b73c6e7d28e4b9b0f337355aa75ce09d56c7e8f7..6f2c8522e14aaf74e0ed7eef7a1cdf60ee335b9b 100644 (file)
@@ -61,21 +61,36 @@ static int mcb_probe(struct device *dev)
        struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
        struct mcb_device *mdev = to_mcb_device(dev);
        const struct mcb_device_id *found_id;
+       struct module *carrier_mod;
+       int ret;
 
        found_id = mcb_match_id(mdrv->id_table, mdev);
        if (!found_id)
                return -ENODEV;
 
-       return mdrv->probe(mdev, found_id);
+       carrier_mod = mdev->dev.parent->driver->owner;
+       if (!try_module_get(carrier_mod))
+               return -EINVAL;
+
+       get_device(dev);
+       ret = mdrv->probe(mdev, found_id);
+       if (ret)
+               module_put(carrier_mod);
+
+       return ret;
 }
 
 static int mcb_remove(struct device *dev)
 {
        struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
        struct mcb_device *mdev = to_mcb_device(dev);
+       struct module *carrier_mod;
 
        mdrv->remove(mdev);
 
+       carrier_mod = mdev->dev.parent->driver->owner;
+       module_put(carrier_mod);
+
        put_device(&mdev->dev);
 
        return 0;
index beb2841ceae58bbb69d65d99bf3697ee6f5dc686..3f1ab4986cfc507d2cf70a451cbbf2962deac2b8 100644 (file)
@@ -779,11 +779,31 @@ static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = {
                        V4L2_DV_BT_CAP_CUSTOM)
 };
 
-static inline const struct v4l2_dv_timings_cap *
-adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd)
+/*
+ * Return the DV timings capabilities for the requested sink pad. As a special
+ * case, pad value -1 returns the capabilities for the currently selected input.
+ */
+static const struct v4l2_dv_timings_cap *
+adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd, int pad)
 {
-       return is_digital_input(sd) ? &adv76xx_timings_cap_digital :
-                                     &adv7604_timings_cap_analog;
+       if (pad == -1) {
+               struct adv76xx_state *state = to_state(sd);
+
+               pad = state->selected_input;
+       }
+
+       switch (pad) {
+       case ADV76XX_PAD_HDMI_PORT_A:
+       case ADV7604_PAD_HDMI_PORT_B:
+       case ADV7604_PAD_HDMI_PORT_C:
+       case ADV7604_PAD_HDMI_PORT_D:
+               return &adv76xx_timings_cap_digital;
+
+       case ADV7604_PAD_VGA_RGB:
+       case ADV7604_PAD_VGA_COMP:
+       default:
+               return &adv7604_timings_cap_analog;
+       }
 }
 
 
@@ -1329,7 +1349,7 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
                const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
 
                if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
-                                          adv76xx_get_dv_timings_cap(sd),
+                                          adv76xx_get_dv_timings_cap(sd, -1),
                                           adv76xx_check_dv_timings, NULL))
                        continue;
                if (vtotal(bt) != stdi->lcf + 1)
@@ -1430,18 +1450,22 @@ static int adv76xx_enum_dv_timings(struct v4l2_subdev *sd,
                return -EINVAL;
 
        return v4l2_enum_dv_timings_cap(timings,
-               adv76xx_get_dv_timings_cap(sd), adv76xx_check_dv_timings, NULL);
+               adv76xx_get_dv_timings_cap(sd, timings->pad),
+               adv76xx_check_dv_timings, NULL);
 }
 
 static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
                        struct v4l2_dv_timings_cap *cap)
 {
        struct adv76xx_state *state = to_state(sd);
+       unsigned int pad = cap->pad;
 
        if (cap->pad >= state->source_pad)
                return -EINVAL;
 
-       *cap = *adv76xx_get_dv_timings_cap(sd);
+       *cap = *adv76xx_get_dv_timings_cap(sd, pad);
+       cap->pad = pad;
+
        return 0;
 }
 
@@ -1450,9 +1474,9 @@ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
 static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
                struct v4l2_dv_timings *timings)
 {
-       v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd),
-                       is_digital_input(sd) ? 250000 : 1000000,
-                       adv76xx_check_dv_timings, NULL);
+       v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd, -1),
+                                is_digital_input(sd) ? 250000 : 1000000,
+                                adv76xx_check_dv_timings, NULL);
 }
 
 static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
@@ -1620,7 +1644,7 @@ static int adv76xx_s_dv_timings(struct v4l2_subdev *sd,
 
        bt = &timings->bt;
 
-       if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd),
+       if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd, -1),
                                   adv76xx_check_dv_timings, NULL))
                return -ERANGE;
 
index 70c28d19ea04c8a7cfa452a6c68d0452094d6fcb..22cf60991df6bc049b4a3a36b34663ba4fdd7bc7 100644 (file)
@@ -45,7 +45,7 @@
 #include <media/v4l2-ioctl.h>
 
 #include <video/omapvrfb.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "omap_voutlib.h"
 #include "omap_voutdef.h"
index 9ccfe1f475a4dce9ec07bae935c3bbdce447f7de..94b5d65afb19b95da62b1862a9aad7f96169b133 100644 (file)
@@ -11,7 +11,7 @@
 #ifndef OMAP_VOUTDEF_H
 #define OMAP_VOUTDEF_H
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <video/omapvrfb.h>
 
 #define YUYV_BPP        2
index 80b0d88f125ce5a61b6da09473b1acb991faf4fc..58a25fdf0cce85190abf2266d063b997b689de88 100644 (file)
@@ -26,7 +26,7 @@
 
 #include <linux/dma-mapping.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "omap_voutlib.h"
 
index 87c12930416f565a118c145501e54df1a6b13cc7..92d9d4214c3adb326dc1a23d0c2c43840172611c 100644 (file)
@@ -1072,7 +1072,7 @@ static int airspy_probe(struct usb_interface *intf,
        if (ret) {
                dev_err(s->dev, "Failed to register as video device (%d)\n",
                                ret);
-               goto err_unregister_v4l2_dev;
+               goto err_free_controls;
        }
        dev_info(s->dev, "Registered as %s\n",
                        video_device_node_name(&s->vdev));
@@ -1081,7 +1081,6 @@ static int airspy_probe(struct usb_interface *intf,
 
 err_free_controls:
        v4l2_ctrl_handler_free(&s->hdl);
-err_unregister_v4l2_dev:
        v4l2_device_unregister(&s->v4l2_dev);
 err_free_mem:
        kfree(s);
index cded5ef52e24f45b01e0e7ee8d5ea0c31da452b8..05eed4be25df2ba22e68b016eb18a40cb8516a59 100644 (file)
@@ -1289,8 +1289,6 @@ struct uvc_xu_control_mapping32 {
 static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
                        const struct uvc_xu_control_mapping32 __user *up)
 {
-       struct uvc_menu_info __user *umenus;
-       struct uvc_menu_info __user *kmenus;
        compat_caddr_t p;
 
        if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1307,17 +1305,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
 
        if (__get_user(p, &up->menu_info))
                return -EFAULT;
-       umenus = compat_ptr(p);
-       if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus)))
-               return -EFAULT;
-
-       kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus));
-       if (kmenus == NULL)
-               return -EFAULT;
-       kp->menu_info = kmenus;
-
-       if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus)))
-               return -EFAULT;
+       kp->menu_info = compat_ptr(p);
 
        return 0;
 }
@@ -1325,10 +1313,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
 static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
                        struct uvc_xu_control_mapping32 __user *up)
 {
-       struct uvc_menu_info __user *umenus;
-       struct uvc_menu_info __user *kmenus = kp->menu_info;
-       compat_caddr_t p;
-
        if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
            __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
            __put_user(kp->menu_count, &up->menu_count))
@@ -1337,16 +1321,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
        if (__clear_user(up->reserved, sizeof(up->reserved)))
                return -EFAULT;
 
-       if (kp->menu_count == 0)
-               return 0;
-
-       if (get_user(p, &up->menu_info))
-               return -EFAULT;
-       umenus = compat_ptr(p);
-
-       if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
-               return -EFAULT;
-
        return 0;
 }
 
@@ -1361,8 +1335,6 @@ struct uvc_xu_control_query32 {
 static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
                        const struct uvc_xu_control_query32 __user *up)
 {
-       u8 __user *udata;
-       u8 __user *kdata;
        compat_caddr_t p;
 
        if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1376,17 +1348,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
 
        if (__get_user(p, &up->data))
                return -EFAULT;
-       udata = compat_ptr(p);
-       if (!access_ok(VERIFY_READ, udata, kp->size))
-               return -EFAULT;
-
-       kdata = compat_alloc_user_space(kp->size);
-       if (kdata == NULL)
-               return -EFAULT;
-       kp->data = kdata;
-
-       if (copy_in_user(kdata, udata, kp->size))
-               return -EFAULT;
+       kp->data = compat_ptr(p);
 
        return 0;
 }
@@ -1394,26 +1356,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
 static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
                        struct uvc_xu_control_query32 __user *up)
 {
-       u8 __user *udata;
-       u8 __user *kdata = kp->data;
-       compat_caddr_t p;
-
        if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
            __copy_to_user(up, kp, offsetof(typeof(*up), data)))
                return -EFAULT;
 
-       if (kp->size == 0)
-               return 0;
-
-       if (get_user(p, &up->data))
-               return -EFAULT;
-       udata = compat_ptr(p);
-       if (!access_ok(VERIFY_READ, udata, kp->size))
-               return -EFAULT;
-
-       if (copy_in_user(udata, kdata, kp->size))
-               return -EFAULT;
-
        return 0;
 }
 
@@ -1423,47 +1369,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
 static long uvc_v4l2_compat_ioctl32(struct file *file,
                     unsigned int cmd, unsigned long arg)
 {
+       struct uvc_fh *handle = file->private_data;
        union {
                struct uvc_xu_control_mapping xmap;
                struct uvc_xu_control_query xqry;
        } karg;
        void __user *up = compat_ptr(arg);
-       mm_segment_t old_fs;
        long ret;
 
        switch (cmd) {
        case UVCIOC_CTRL_MAP32:
-               cmd = UVCIOC_CTRL_MAP;
                ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
+               if (ret)
+                       return ret;
+               ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
+               if (ret)
+                       return ret;
+               ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
+               if (ret)
+                       return ret;
+
                break;
 
        case UVCIOC_CTRL_QUERY32:
-               cmd = UVCIOC_CTRL_QUERY;
                ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
+               if (ret)
+                       return ret;
+               ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
+               if (ret)
+                       return ret;
+               ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
+               if (ret)
+                       return ret;
                break;
 
        default:
                return -ENOIOCTLCMD;
        }
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       ret = video_ioctl2(file, cmd, (unsigned long)&karg);
-       set_fs(old_fs);
-
-       if (ret < 0)
-               return ret;
-
-       switch (cmd) {
-       case UVCIOC_CTRL_MAP:
-               ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
-               break;
-
-       case UVCIOC_CTRL_QUERY:
-               ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
-               break;
-       }
-
        return ret;
 }
 #endif
index 28e5be2c2eef22f75a1a902131ee987bb927ad0d..528390f33b5336842491d039658a87f11b3a75f2 100644 (file)
@@ -2171,7 +2171,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
         * The determine_valid_ioctls() call already should ensure
         * that this can never happen, but just in case...
         */
-       if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_cropcap))
+       if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_g_selection))
                return -ENOTTY;
 
        if (ops->vidioc_cropcap)
index af4884ba6b7cafa6fe5b2eeda1bee00e59fdd2a3..15508df24e5d36fe1e6c7337de88a3fe9dd66797 100644 (file)
@@ -398,7 +398,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
        gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
                           GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
        gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
-                          GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
+                          GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
        gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
                           GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
                           p->cycle2cyclesamecsen);
index 199d261990be58c6c7ebaf9f783000d03d51098b..f32fbb8e8129e0b013b766c98457446d9588117d 100644 (file)
@@ -203,6 +203,7 @@ static int max77620_get_fps_period_reg_value(struct max77620_chip *chip,
                break;
        case MAX77620:
                fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
+               break;
        default:
                return -EINVAL;
        }
@@ -236,6 +237,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
                break;
        case MAX77620:
                fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
+               break;
        default:
                return -EINVAL;
        }
index eed254da63a8c79ebb95ff711aef5b4e8926f0d9..641c1a5666879b9715f5ee0924db447411a2e653 100644 (file)
@@ -730,7 +730,7 @@ static void mei_cl_wake_all(struct mei_cl *cl)
        /* synchronized under device mutex */
        if (waitqueue_active(&cl->wait)) {
                cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
-               wake_up_interruptible(&cl->wait);
+               wake_up(&cl->wait);
        }
 }
 
index e62fde3ac431c111ec945d4aba9c877f6308d81d..c5472e3c923126097fd93f2abf31402a1717c228 100644 (file)
@@ -355,8 +355,10 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
                goto idata_err;
        }
 
-       if (!idata->buf_bytes)
+       if (!idata->buf_bytes) {
+               idata->buf = NULL;
                return idata;
+       }
 
        idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
        if (!idata->buf) {
@@ -1786,8 +1788,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
 
        packed_cmd_hdr = packed->cmd_hdr;
        memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
-       packed_cmd_hdr[0] = (packed->nr_entries << 16) |
-               (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+       packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+               (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
        hdr_blocks = mmc_large_sector(card) ? 8 : 1;
 
        /*
@@ -1801,14 +1803,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
                        ((brq->data.blocks * brq->data.blksz) >=
                         card->ext_csd.data_tag_unit_size);
                /* Argument of CMD23 */
-               packed_cmd_hdr[(i * 2)] =
+               packed_cmd_hdr[(i * 2)] = cpu_to_le32(
                        (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
                        (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
-                       blk_rq_sectors(prq);
+                       blk_rq_sectors(prq));
                /* Argument of CMD18 or CMD25 */
-               packed_cmd_hdr[((i * 2)) + 1] =
+               packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
                        mmc_card_blockaddr(card) ?
-                       blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+                       blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
                packed->blocks += blk_rq_sectors(prq);
                i++;
        }
index c984321d1881d90ea8b7633abd0c55a605d78502..5d438ad3ee32c994eab11f33c267d7d0d62dbbae 100644 (file)
@@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card)
         * switch to HS200 mode if bus width is set successfully.
         */
        err = mmc_select_bus_width(card);
-       if (!err) {
+       if (err >= 0) {
                val = EXT_CSD_TIMING_HS200 |
                      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
                err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
        } else if (mmc_card_hs(card)) {
                /* Select the desired bus width optionally */
                err = mmc_select_bus_width(card);
-               if (!err) {
+               if (err >= 0) {
                        err = mmc_select_hs_ddr(card);
                        if (err)
                                goto free_card;
index 86fac3e8683378acde59d1da87a0fad3878bf481..c763b404510f3a29d3864290297b22f596701cc3 100644 (file)
@@ -789,14 +789,16 @@ static int pxamci_probe(struct platform_device *pdev)
                gpio_direction_output(gpio_power,
                                      host->pdata->gpio_power_invert);
        }
-       if (gpio_is_valid(gpio_ro))
+       if (gpio_is_valid(gpio_ro)) {
                ret = mmc_gpio_request_ro(mmc, gpio_ro);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
-               goto out;
-       } else {
-               mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
-                       0 : MMC_CAP2_RO_ACTIVE_HIGH;
+               if (ret) {
+                       dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
+                               gpio_ro);
+                       goto out;
+               } else {
+                       mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+                               0 : MMC_CAP2_RO_ACTIVE_HIGH;
+               }
        }
 
        if (gpio_is_valid(gpio_cd))
index 7fc8b7aa83f029eb199f1accd3de9d47ea12b152..2ee4c21ec55eec871c13aadaaad5bb5f9903a581 100644 (file)
@@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
        [SDXC_CLK_400K]         = { .output = 180, .sample = 180 },
        [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
        [SDXC_CLK_50M]          = { .output = 150, .sample = 120 },
-       [SDXC_CLK_50M_DDR]      = { .output =  90, .sample = 120 },
-       [SDXC_CLK_50M_DDR_8BIT] = { .output =  90, .sample = 120 },
+       [SDXC_CLK_50M_DDR]      = { .output =  54, .sample =  36 },
+       [SDXC_CLK_50M_DDR_8BIT] = { .output =  72, .sample =  72 },
 };
 
 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
                                  MMC_CAP_1_8V_DDR |
                                  MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
 
-       /* TODO MMC DDR is not working on A80 */
-       if (of_device_is_compatible(pdev->dev.of_node,
-                                   "allwinner,sun9i-a80-mmc"))
-               mmc->caps &= ~MMC_CAP_1_8V_DDR;
-
        ret = mmc_of_parse(mmc);
        if (ret)
                goto error_free_dma;
index 08e158895635cddda0bbea4e6973cfb8dd727e74..a136da8df6fe897d4f908d2723d4a95d8b152933 100644 (file)
@@ -1657,8 +1657,11 @@ static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
 
        /* detect availability of ELM module. Won't be present pre-OMAP4 */
        info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
-       if (!info->elm_of_node)
-               dev_dbg(dev, "ti,elm-id not in DT\n");
+       if (!info->elm_of_node) {
+               info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+               if (!info->elm_of_node)
+                       dev_dbg(dev, "ti,elm-id not in DT\n");
+       }
 
        /* select ecc-scheme for NAND */
        if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
index 16baeb51b2bdfe2b37d5179b9816ba6493622e99..ef3618299494f4973fd528ae72325490eb622b2c 100644 (file)
@@ -1147,11 +1147,17 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
  */
 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
 {
-       struct kstat stat;
        int err, minor;
+       struct path path;
+       struct kstat stat;
 
        /* Probably this is an MTD character device node path */
-       err = vfs_stat(mtd_dev, &stat);
+       err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+       if (err)
+               return ERR_PTR(err);
+
+       err = vfs_getattr(&path, &stat);
+       path_put(&path);
        if (err)
                return ERR_PTR(err);
 
@@ -1160,6 +1166,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
                return ERR_PTR(-EINVAL);
 
        minor = MINOR(stat.rdev);
+
        if (minor & 1)
                /*
                 * Just do not think the "/dev/mtdrX" devices support is need,
index 5780dd1ba79de96f21d4f08f7c12c5426b0d1c3b..ebf517271d2925452d911eb8df9a9d973b05926c 100644 (file)
@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
        int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
        struct ubi_volume *vol = ubi->volumes[idx];
        struct ubi_vid_hdr *vid_hdr;
+       uint32_t crc;
 
        vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
        if (!vid_hdr)
@@ -599,14 +600,8 @@ retry:
                goto out_put;
        }
 
-       vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-       err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
-       if (err) {
-               up_read(&ubi->fm_eba_sem);
-               goto write_error;
-       }
+       ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
 
-       data_size = offset + len;
        mutex_lock(&ubi->buf_mutex);
        memset(ubi->peb_buf + offset, 0xFF, len);
 
@@ -621,6 +616,19 @@ retry:
 
        memcpy(ubi->peb_buf + offset, buf, len);
 
+       data_size = offset + len;
+       crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+       vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+       vid_hdr->copy_flag = 1;
+       vid_hdr->data_size = cpu_to_be32(data_size);
+       vid_hdr->data_crc = cpu_to_be32(crc);
+       err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+       if (err) {
+               mutex_unlock(&ubi->buf_mutex);
+               up_read(&ubi->fm_eba_sem);
+               goto write_error;
+       }
+
        err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
        if (err) {
                mutex_unlock(&ubi->buf_mutex);
index 348dbbcbedc8fc3e3c93f9257cf1c4ccb7abddf8..a9e2cef7c95c24c0deb4d6500d7da7f00424db7b 100644 (file)
@@ -302,6 +302,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
 struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
 {
        int error, ubi_num, vol_id;
+       struct path path;
        struct kstat stat;
 
        dbg_gen("open volume %s, mode %d", pathname, mode);
@@ -309,7 +310,12 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
        if (!pathname || !*pathname)
                return ERR_PTR(-EINVAL);
 
-       error = vfs_stat(pathname, &stat);
+       error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+       if (error)
+               return ERR_PTR(error);
+
+       error = vfs_getattr(&path, &stat);
+       path_put(&path);
        if (error)
                return ERR_PTR(error);
 
index b9304a295f8648286c1d19e468c30b44084a4241..edc70ffad6607ac06d0a40b48316bef554c5f4c2 100644 (file)
@@ -101,11 +101,14 @@ enum ad_link_speed_type {
 #define MAC_ADDRESS_EQUAL(A, B)        \
        ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
 
-static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
+static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+       0, 0, 0, 0, 0, 0
+};
 static u16 ad_ticks_per_sec;
 static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
 
-static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+       MULTICAST_LACPDU_ADDR;
 
 /* ================= main 802.3ad protocol functions ================== */
 static int ad_lacpdu_send(struct port *port);
@@ -657,6 +660,20 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
        }
 }
 
+static int __agg_active_ports(struct aggregator *agg)
+{
+       struct port *port;
+       int active = 0;
+
+       for (port = agg->lag_ports; port;
+            port = port->next_port_in_aggregator) {
+               if (port->is_enabled)
+                       active++;
+       }
+
+       return active;
+}
+
 /**
  * __get_agg_bandwidth - get the total bandwidth of an aggregator
  * @aggregator: the aggregator we're looking at
@@ -664,39 +681,40 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
  */
 static u32 __get_agg_bandwidth(struct aggregator *aggregator)
 {
+       int nports = __agg_active_ports(aggregator);
        u32 bandwidth = 0;
 
-       if (aggregator->num_of_ports) {
+       if (nports) {
                switch (__get_link_speed(aggregator->lag_ports)) {
                case AD_LINK_SPEED_1MBPS:
-                       bandwidth = aggregator->num_of_ports;
+                       bandwidth = nports;
                        break;
                case AD_LINK_SPEED_10MBPS:
-                       bandwidth = aggregator->num_of_ports * 10;
+                       bandwidth = nports * 10;
                        break;
                case AD_LINK_SPEED_100MBPS:
-                       bandwidth = aggregator->num_of_ports * 100;
+                       bandwidth = nports * 100;
                        break;
                case AD_LINK_SPEED_1000MBPS:
-                       bandwidth = aggregator->num_of_ports * 1000;
+                       bandwidth = nports * 1000;
                        break;
                case AD_LINK_SPEED_2500MBPS:
-                       bandwidth = aggregator->num_of_ports * 2500;
+                       bandwidth = nports * 2500;
                        break;
                case AD_LINK_SPEED_10000MBPS:
-                       bandwidth = aggregator->num_of_ports * 10000;
+                       bandwidth = nports * 10000;
                        break;
                case AD_LINK_SPEED_20000MBPS:
-                       bandwidth = aggregator->num_of_ports * 20000;
+                       bandwidth = nports * 20000;
                        break;
                case AD_LINK_SPEED_40000MBPS:
-                       bandwidth = aggregator->num_of_ports * 40000;
+                       bandwidth = nports * 40000;
                        break;
                case AD_LINK_SPEED_56000MBPS:
-                       bandwidth = aggregator->num_of_ports * 56000;
+                       bandwidth = nports * 56000;
                        break;
                case AD_LINK_SPEED_100000MBPS:
-                       bandwidth = aggregator->num_of_ports * 100000;
+                       bandwidth = nports * 100000;
                        break;
                default:
                        bandwidth = 0; /* to silence the compiler */
@@ -1530,10 +1548,10 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
 
        switch (__get_agg_selection_mode(curr->lag_ports)) {
        case BOND_AD_COUNT:
-               if (curr->num_of_ports > best->num_of_ports)
+               if (__agg_active_ports(curr) > __agg_active_ports(best))
                        return curr;
 
-               if (curr->num_of_ports < best->num_of_ports)
+               if (__agg_active_ports(curr) < __agg_active_ports(best))
                        return best;
 
                /*FALLTHROUGH*/
@@ -1561,8 +1579,14 @@ static int agg_device_up(const struct aggregator *agg)
        if (!port)
                return 0;
 
-       return netif_running(port->slave->dev) &&
-              netif_carrier_ok(port->slave->dev);
+       for (port = agg->lag_ports; port;
+            port = port->next_port_in_aggregator) {
+               if (netif_running(port->slave->dev) &&
+                   netif_carrier_ok(port->slave->dev))
+                       return 1;
+       }
+
+       return 0;
 }
 
 /**
@@ -1610,7 +1634,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
 
                agg->is_active = 0;
 
-               if (agg->num_of_ports && agg_device_up(agg))
+               if (__agg_active_ports(agg) && agg_device_up(agg))
                        best = ad_agg_selection_test(best, agg);
        }
 
@@ -1622,7 +1646,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
                 * answering partner.
                 */
                if (active && active->lag_ports &&
-                   active->lag_ports->is_enabled &&
+                   __agg_active_ports(active) &&
                    (__agg_has_partner(active) ||
                     (!__agg_has_partner(active) &&
                     !__agg_has_partner(best)))) {
@@ -1718,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
                aggregator->is_individual = false;
                aggregator->actor_admin_aggregator_key = 0;
                aggregator->actor_oper_aggregator_key = 0;
-               aggregator->partner_system = null_mac_addr;
+               eth_zero_addr(aggregator->partner_system.mac_addr_value);
                aggregator->partner_system_priority = 0;
                aggregator->partner_oper_aggregator_key = 0;
                aggregator->receive_state = 0;
@@ -1740,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
        if (aggregator) {
                ad_clear_agg(aggregator);
 
-               aggregator->aggregator_mac_address = null_mac_addr;
+               eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
                aggregator->aggregator_identifier = 0;
                aggregator->slave = NULL;
        }
@@ -2133,7 +2157,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                else
                                        temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
                                temp_aggregator->num_of_ports--;
-                               if (temp_aggregator->num_of_ports == 0) {
+                               if (__agg_active_ports(temp_aggregator) == 0) {
                                        select_new_active_agg = temp_aggregator->is_active;
                                        ad_clear_agg(temp_aggregator);
                                        if (select_new_active_agg) {
@@ -2432,7 +2456,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
  */
 void bond_3ad_handle_link_change(struct slave *slave, char link)
 {
+       struct aggregator *agg;
        struct port *port;
+       bool dummy;
 
        port = &(SLAVE_AD_INFO(slave)->port);
 
@@ -2459,6 +2485,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
                port->is_enabled = false;
                ad_update_actor_keys(port, true);
        }
+       agg = __get_first_agg(port);
+       ad_agg_selection_logic(agg, &dummy);
+
        netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
                   port->actor_port_number,
                   link == BOND_LINK_UP ? "UP" : "DOWN");
@@ -2499,7 +2528,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
        active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
        if (active) {
                /* are enough slaves available to consider link up? */
-               if (active->num_of_ports < bond->params.min_links) {
+               if (__agg_active_ports(active) < bond->params.min_links) {
                        if (netif_carrier_ok(bond->dev)) {
                                netif_carrier_off(bond->dev);
                                goto out;
index c5ac160a8ae954d1104084c3d94795725cc261ae..551f0f8dead3945cf1516aeff01afe6c74117072 100644 (file)
 
 
 
-#ifndef __long_aligned
-#define __long_aligned __attribute__((aligned((sizeof(long)))))
-#endif
-static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
        0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 };
-static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
        0x33, 0x33, 0x00, 0x00, 0x00, 0x01
 };
 static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
index 941ec99cd3b69b6c9d3596e5b65b6aa89bc960c7..a2afa3be17a4bcc4e662a17e49f5c6bc31bce4d3 100644 (file)
@@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        /* check for initial state */
+       new_slave->link = BOND_LINK_NOCHANGE;
        if (bond->params.miimon) {
                if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
                        if (bond->params.updelay) {
index db760e84119fcb970b7b34f7c4fac92b1acfed52..b8df0f5e8c25ae35b1ce368b04536c40761a1c96 100644 (file)
@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
        if (err < 0)
                return err;
 
-       return register_netdevice(bond_dev);
+       err = register_netdevice(bond_dev);
+
+       netif_carrier_off(bond_dev);
+
+       return err;
 }
 
 static size_t bond_get_size(const struct net_device *bond_dev)
index 8b3275d7792acbab2d0ba9efe9fe3e2a6b283231..8f5e93cb79752703c141704011490535664b941e 100644 (file)
@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
 
        /* upper group completed, look again in lower */
        if (priv->rx_next > get_mb_rx_low_last(priv) &&
-           quota > 0 && mb > get_mb_rx_last(priv)) {
+           mb > get_mb_rx_last(priv)) {
                priv->rx_next = get_mb_rx_first(priv);
-               goto again;
+               if (quota > 0)
+                       goto again;
        }
 
        return received;
index f91b094288dad3d86064f24a33a97ad58756f3ca..e3dccd3200d5d834f13ad036c290c51e3091052e 100644 (file)
@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
 
        priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
 
-       for (i = 0; i < frame->can_dlc; i += 2) {
-               priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
-                               frame->data[i] | (frame->data[i + 1] << 8));
+       if (priv->type == BOSCH_D_CAN) {
+               u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+               for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+                       data = (u32)frame->data[i];
+                       data |= (u32)frame->data[i + 1] << 8;
+                       data |= (u32)frame->data[i + 2] << 16;
+                       data |= (u32)frame->data[i + 3] << 24;
+                       priv->write_reg32(priv, dreg, data);
+               }
+       } else {
+               for (i = 0; i < frame->can_dlc; i += 2) {
+                       priv->write_reg(priv,
+                                       C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+                                       frame->data[i] |
+                                       (frame->data[i + 1] << 8));
+               }
        }
 }
 
@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
        } else {
                int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
 
-               for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
-                       data = priv->read_reg(priv, dreg);
-                       frame->data[i] = data;
-                       frame->data[i + 1] = data >> 8;
+               if (priv->type == BOSCH_D_CAN) {
+                       for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+                               data = priv->read_reg32(priv, dreg);
+                               frame->data[i] = data;
+                               frame->data[i + 1] = data >> 8;
+                               frame->data[i + 2] = data >> 16;
+                               frame->data[i + 3] = data >> 24;
+                       }
+               } else {
+                       for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+                               data = priv->read_reg(priv, dreg);
+                               frame->data[i] = data;
+                               frame->data[i + 1] = data >> 8;
+                       }
                }
        }
 
index 910c12e2638e3615084ca9f6a148030c8f2358e1..ad535a854e5cf28309a71547b49e411db85b385b 100644 (file)
@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
         * - control mode with CAN_CTRLMODE_FD set
         */
 
+       if (!data)
+               return 0;
+
        if (data[IFLA_CAN_CTRLMODE]) {
                struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
 
@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
        return -EOPNOTSUPP;
 }
 
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+       return;
+}
+
 static struct rtnl_link_ops can_link_ops __read_mostly = {
        .kind           = "can",
        .maxtype        = IFLA_CAN_MAX,
@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
        .validate       = can_validate,
        .newlink        = can_newlink,
        .changelink     = can_changelink,
+       .dellink        = can_dellink,
        .get_size       = can_get_size,
        .fill_info      = can_fill_info,
        .get_xstats_size = can_get_xstats_size,
index bcb272f6c68a9ac242964af7901bf6597a499306..8483a40e7e9ef52327e15e03fa4100c73f68a53f 100644 (file)
@@ -16,7 +16,8 @@ config CAN_ESD_USB2
 config CAN_GS_USB
        tristate "Geschwister Schneider UG interfaces"
        ---help---
-         This driver supports the Geschwister Schneider USB/CAN devices.
+         This driver supports the Geschwister Schneider and bytewerk.org
+         candleLight USB CAN interfaces USB/CAN devices
          If unsure choose N,
          choose Y for built in support,
          M to compile as module (module will be named: gs_usb).
@@ -46,6 +47,8 @@ config CAN_KVASER_USB
            - Kvaser USBcan R
            - Kvaser Leaf Light v2
            - Kvaser Mini PCI Express HS
+           - Kvaser Mini PCI Express 2xHS
+           - Kvaser USBcan Light 2xHS
            - Kvaser USBcan II HS/HS
            - Kvaser USBcan II HS/LS
            - Kvaser USBcan Rugged ("USBcan Rev B")
index 1556d428623531026ef58fe15260ff2e88a04372..acb0c8490673a081fca9738659235f0673e5167d 100644 (file)
@@ -1,7 +1,9 @@
-/* CAN driver for Geschwister Schneider USB/CAN devices.
+/* CAN driver for Geschwister Schneider USB/CAN devices
+ * and bytewerk.org candleLight USB CAN interfaces.
  *
- * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
  * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ * Copyright (C) 2016 Hubert Denkmair
  *
  * Many thanks to all socketcan devs!
  *
@@ -29,6 +31,9 @@
 #define USB_GSUSB_1_VENDOR_ID      0x1d50
 #define USB_GSUSB_1_PRODUCT_ID     0x606f
 
+#define USB_CANDLELIGHT_VENDOR_ID  0x1209
+#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
+
 #define GSUSB_ENDPOINT_IN          1
 #define GSUSB_ENDPOINT_OUT         2
 
@@ -952,6 +957,8 @@ static void gs_usb_disconnect(struct usb_interface *intf)
 static const struct usb_device_id gs_usb_table[] = {
        { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
                                      USB_GSUSB_1_PRODUCT_ID, 0) },
+       { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
+                                     USB_CANDLELIGHT_PRODUCT_ID, 0) },
        {} /* Terminating entry */
 };
 
@@ -969,5 +976,6 @@ module_usb_driver(gs_usb_driver);
 MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
 MODULE_DESCRIPTION(
 "Socket CAN device driver for Geschwister Schneider Technologie-, "
-"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
+"and bytewerk.org candleLight USB CAN interfaces.");
 MODULE_LICENSE("GPL v2");
index 022bfa13ebfa0c85491cfc2cb17a3437c01f0205..6f1f3b675ff553a96d4342b619e34a606fa9be29 100644 (file)
 #define USB_CAN_R_PRODUCT_ID           39
 #define USB_LEAF_LITE_V2_PRODUCT_ID    288
 #define USB_MINI_PCIE_HS_PRODUCT_ID    289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID        291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID   292
 
 static inline bool kvaser_is_leaf(const struct usb_device_id *id)
 {
        return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
-              id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
+              id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
 }
 
 /* Kvaser USBCan-II devices */
@@ -537,6 +540,9 @@ static const struct usb_device_id kvaser_usb_table[] = {
                .driver_info = KVASER_HAS_TXRX_ERRORS },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
 
        /* USBCANII family IDs */
        { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
index 30defe6c81f22d6463c4df091a421014392f6ff2..821d86c38ab214a9c450f3079398b1d3a4af2e24 100644 (file)
@@ -3851,7 +3851,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
        unsigned long flags;
 
        /* If the device is closed, ignore the timeout */
-       if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
+       if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
                return;
 
        /* Any nonrecoverable hardware error?
index e0fb0f1122db42553c0ed4a907d89a1e03e1f209..20760e10211a48ba15daa72a5c4322c90ceb0157 100644 (file)
@@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev)
         * on the current MAC's MII bus
         */
        for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
-               if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
-                       phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+               if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+                       phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
                        if (!aup->phy_search_highest_addr)
                                /* break out with first one found */
                                break;
index 16419f550eff0643665fc6e9b24b96a22eb2ea45..058460bdd5a6ea5ce2e3de17dbd4da958a306b12 100644 (file)
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
        priv->bus = bus;
        bus->priv = priv;
        bus->parent = priv->dev;
-       bus->name = "Synopsys MII Bus",
+       bus->name = "Synopsys MII Bus";
        bus->read = &arc_mdio_read;
        bus->write = &arc_mdio_write;
        bus->reset = &arc_mdio_reset;
index 9fe8b5e310d15a024aec48456f43444137637138..e708e360a9e3919df7b4e7b6e8500e5c27d24f55 100644 (file)
@@ -86,9 +86,22 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
        while (!cur_buf->skb && next != rxq->read_idx) {
                struct alx_rfd *rfd = &rxq->rfd[cur];
 
-               skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+               /*
+                * When DMA RX address is set to something like
+                * 0x....fc0, it will be very likely to cause DMA
+                * RFD overflow issue.
+                *
+                * To work around it, we apply rx skb with 64 bytes
+                * longer space, and offset the address whenever
+                * 0x....fc0 is detected.
+                */
+               skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
                if (!skb)
                        break;
+
+               if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+                       skb_reserve(skb, 64);
+
                dma = dma_map_single(&alx->hw.pdev->dev,
                                     skb->data, alx->rxbuf_size,
                                     DMA_FROM_DEVICE);
index 08a23e6b60e947894783f2115c2fe16abece84bc..1a3555d03a96a823e4936c06a2e9ce64d27ae851 100644 (file)
@@ -259,6 +259,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
                if (err) {
                        netdev_err(dev, "rx buffer allocation failed\n");
                        dev->stats.rx_dropped++;
+                       dev_kfree_skb(skb);
                        return;
                }
 
index 543bf38105c9240d9ae374708377755c5e4db9a6..bfa26a2590c979b41bc6c46aee275d730e1ce89d 100644 (file)
@@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                else
                        p = (char *)priv;
                p += s->stat_offset;
-               data[i] = *(u32 *)p;
+               data[i] = *(unsigned long *)p;
        }
 }
 
index ee5f431ab32af4f2b7c175f836ac10f697016e1e..25bbae5928d43a4d911d0d5852612d8524437cdb 100644 (file)
@@ -231,7 +231,7 @@ err_dma:
        dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
                         DMA_TO_DEVICE);
 
-       while (i > 0) {
+       while (i-- > 0) {
                int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
                struct bgmac_slot_info *slot = &ring->slots[index];
                u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
@@ -267,15 +267,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
        while (ring->start != ring->end) {
                int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
                struct bgmac_slot_info *slot = &ring->slots[slot_idx];
-               u32 ctl1;
+               u32 ctl0, ctl1;
                int len;
 
                if (slot_idx == empty_slot)
                        break;
 
+               ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
                ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
                len = ctl1 & BGMAC_DESC_CTL1_LEN;
-               if (ctl1 & BGMAC_DESC_CTL0_SOF)
+               if (ctl0 & BGMAC_DESC_CTL0_SOF)
                        /* Unmap no longer used buffer */
                        dma_unmap_single(dma_dev, slot->dma_addr, len,
                                         DMA_TO_DEVICE);
@@ -1312,7 +1313,8 @@ static int bgmac_open(struct net_device *net_dev)
 
        phy_start(bgmac->phy_dev);
 
-       netif_carrier_on(net_dev);
+       netif_start_queue(net_dev);
+
        return 0;
 }
 
index 0a5b770cefaab6b316f1a9bb6e899c9ef32c953a..a59d55e25d5f02cbc69d903daaa577e21bafc10f 100644 (file)
@@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
        return rc;
 }
 
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
 {
        struct bnx2x_vlan_entry *vlan;
        int rc = 0;
 
-       if (!bp->vlan_cnt) {
-               DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
-               return 0;
-       }
-
+       /* Configure all non-configured entries */
        list_for_each_entry(vlan, &bp->vlan_reg, link) {
-               /* Prepare for cleanup in case of errors */
-               if (rc) {
-                       vlan->hw = false;
-                       continue;
-               }
-
-               if (!vlan->hw)
+               if (vlan->hw)
                        continue;
 
-               DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+               if (bp->vlan_cnt >= bp->vlan_credit)
+                       return -ENOBUFS;
 
                rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
                if (rc) {
-                       BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
-                       vlan->hw = false;
-                       rc = -EINVAL;
-                       continue;
+                       BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+                       return rc;
                }
+
+               DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+               vlan->hw = true;
+               bp->vlan_cnt++;
        }
 
-       return rc;
+       return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+       bool need_accept_any_vlan;
+
+       need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+       if (bp->accept_any_vlan != need_accept_any_vlan) {
+               bp->accept_any_vlan = need_accept_any_vlan;
+               DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+                  bp->accept_any_vlan ? "raised" : "cleared");
+               if (set_rx_mode) {
+                       if (IS_PF(bp))
+                               bnx2x_set_rx_mode_inner(bp);
+                       else
+                               bnx2x_vfpf_storm_rx_mode(bp);
+               }
+       }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+       struct bnx2x_vlan_entry *vlan;
+
+       /* The hw forgot all entries after reload */
+       list_for_each_entry(vlan, &bp->vlan_reg, link)
+               vlan->hw = false;
+       bp->vlan_cnt = 0;
+
+       /* Don't set rx mode here. Our caller will do it. */
+       bnx2x_vlan_configure(bp, false);
+
+       return 0;
 }
 
 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct bnx2x *bp = netdev_priv(dev);
        struct bnx2x_vlan_entry *vlan;
-       bool hw = false;
-       int rc = 0;
-
-       if (!netif_running(bp->dev)) {
-               DP(NETIF_MSG_IFUP,
-                  "Ignoring VLAN configuration the interface is down\n");
-               return -EFAULT;
-       }
 
        DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
 
@@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        if (!vlan)
                return -ENOMEM;
 
-       bp->vlan_cnt++;
-       if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
-               DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
-               bp->accept_any_vlan = true;
-               if (IS_PF(bp))
-                       bnx2x_set_rx_mode_inner(bp);
-               else
-                       bnx2x_vfpf_storm_rx_mode(bp);
-       } else if (bp->vlan_cnt <= bp->vlan_credit) {
-               rc = __bnx2x_vlan_configure_vid(bp, vid, true);
-               hw = true;
-       }
-
        vlan->vid = vid;
-       vlan->hw = hw;
+       vlan->hw = false;
+       list_add_tail(&vlan->link, &bp->vlan_reg);
 
-       if (!rc) {
-               list_add(&vlan->link, &bp->vlan_reg);
-       } else {
-               bp->vlan_cnt--;
-               kfree(vlan);
-       }
-
-       DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+       if (netif_running(dev))
+               bnx2x_vlan_configure(bp, true);
 
-       return rc;
+       return 0;
 }
 
 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct bnx2x *bp = netdev_priv(dev);
        struct bnx2x_vlan_entry *vlan;
+       bool found = false;
        int rc = 0;
 
-       if (!netif_running(bp->dev)) {
-               DP(NETIF_MSG_IFUP,
-                  "Ignoring VLAN configuration the interface is down\n");
-               return -EFAULT;
-       }
-
        DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
 
-       if (!bp->vlan_cnt) {
-               BNX2X_ERR("Unable to kill VLAN %d\n", vid);
-               return -EINVAL;
-       }
-
        list_for_each_entry(vlan, &bp->vlan_reg, link)
-               if (vlan->vid == vid)
+               if (vlan->vid == vid) {
+                       found = true;
                        break;
+               }
 
-       if (vlan->vid != vid) {
+       if (!found) {
                BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
                return -EINVAL;
        }
 
-       if (vlan->hw)
+       if (netif_running(dev) && vlan->hw) {
                rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+               DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+               bp->vlan_cnt--;
+       }
 
        list_del(&vlan->link);
        kfree(vlan);
 
-       bp->vlan_cnt--;
-
-       if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
-               /* Configure all non-configured entries */
-               list_for_each_entry(vlan, &bp->vlan_reg, link) {
-                       if (vlan->hw)
-                               continue;
-
-                       rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
-                       if (rc) {
-                               BNX2X_ERR("Unable to config VLAN %d\n",
-                                         vlan->vid);
-                               continue;
-                       }
-                       DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
-                          vlan->vid);
-                       vlan->hw = true;
-               }
-               DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
-               bp->accept_any_vlan = false;
-               if (IS_PF(bp))
-                       bnx2x_set_rx_mode_inner(bp);
-               else
-                       bnx2x_vfpf_storm_rx_mode(bp);
-       }
+       if (netif_running(dev))
+               bnx2x_vlan_configure(bp, true);
 
        DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
 
@@ -13941,14 +13914,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                bp->doorbells = bnx2x_vf_doorbells(bp);
                rc = bnx2x_vf_pci_alloc(bp);
                if (rc)
-                       goto init_one_exit;
+                       goto init_one_freemem;
        } else {
                doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
                if (doorbell_size > pci_resource_len(pdev, 2)) {
                        dev_err(&bp->pdev->dev,
                                "Cannot map doorbells, bar size too small, aborting\n");
                        rc = -ENOMEM;
-                       goto init_one_exit;
+                       goto init_one_freemem;
                }
                bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
                                                doorbell_size);
@@ -13957,19 +13930,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                dev_err(&bp->pdev->dev,
                        "Cannot map doorbell space, aborting\n");
                rc = -ENOMEM;
-               goto init_one_exit;
+               goto init_one_freemem;
        }
 
        if (IS_VF(bp)) {
                rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
                if (rc)
-                       goto init_one_exit;
+                       goto init_one_freemem;
        }
 
        /* Enable SRIOV if capability found in configuration space */
        rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
        if (rc)
-               goto init_one_exit;
+               goto init_one_freemem;
 
        /* calc qm_cid_count */
        bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13988,7 +13961,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        rc = bnx2x_set_int_mode(bp);
        if (rc) {
                dev_err(&pdev->dev, "Cannot set interrupts\n");
-               goto init_one_exit;
+               goto init_one_freemem;
        }
        BNX2X_DEV_INFO("set interrupts successfully\n");
 
@@ -13996,7 +13969,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        rc = register_netdev(dev);
        if (rc) {
                dev_err(&pdev->dev, "Cannot register net device\n");
-               goto init_one_exit;
+               goto init_one_freemem;
        }
        BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
 
@@ -14029,6 +14002,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 
        return 0;
 
+init_one_freemem:
+       bnx2x_free_mem_bp(bp);
+
 init_one_exit:
        bnx2x_disable_pcie_error_reporting(bp);
 
index 72a2efff8e494c7f077b2ee1311bdd54e857ca03..c777cde85ce416d98f3959178c193c0cca4b6c1c 100644 (file)
@@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
                txr->tx_prod = prod;
 
+               tx_buf->is_push = 1;
                netdev_tx_sent_queue(txq, skb->len);
+               wmb();  /* Sync is_push and byte queue before pushing data */
 
                push_len = (length + sizeof(*tx_push) + 7) / 8;
                if (push_len > 16) {
@@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                         push_len);
                }
 
-               tx_buf->is_push = 1;
                goto tx_done;
        }
 
@@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
        if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
                skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
 
-       if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
-               netdev_features_t features = skb->dev->features;
+       if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                u16 vlan_proto = tpa_info->metadata >>
                        RX_CMP_FLAGS2_METADATA_TPID_SFT;
+               u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
 
-               if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
-                    vlan_proto == ETH_P_8021Q) ||
-                   ((features & NETIF_F_HW_VLAN_STAG_RX) &&
-                    vlan_proto == ETH_P_8021AD)) {
-                       __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
-                                              tpa_info->metadata &
-                                              RX_CMP_FLAGS2_METADATA_VID_MASK);
-               }
+               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
        }
 
        skb_checksum_none_assert(skb);
@@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 
        skb->protocol = eth_type_trans(skb, dev);
 
-       if (rxcmp1->rx_cmp_flags2 &
-           cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
-               netdev_features_t features = skb->dev->features;
+       if ((rxcmp1->rx_cmp_flags2 &
+            cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+               u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
                u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
 
-               if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
-                    vlan_proto == ETH_P_8021Q) ||
-                   ((features & NETIF_F_HW_VLAN_STAG_RX) &&
-                    vlan_proto == ETH_P_8021AD))
-                       __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
-                                              meta_data &
-                                              RX_CMP_FLAGS2_METADATA_VID_MASK);
+               __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
        }
 
        skb_checksum_none_assert(skb);
@@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
 
        if (!bnxt_rfs_capable(bp))
                features &= ~NETIF_F_NTUPLE;
+
+       /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+        * turned on or off together.
+        */
+       if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+           (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+               if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+                       features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+                                     NETIF_F_HW_VLAN_STAG_RX);
+               else
+                       features |= NETIF_F_HW_VLAN_CTAG_RX |
+                                   NETIF_F_HW_VLAN_STAG_RX;
+       }
+
        return features;
 }
 
index a38cb047b54083897fa6e8df5c098c7e1b98d7ac..1b0ae4a72e9ecd81a1d404c5e6edadb3f00ce8cc 100644 (file)
@@ -1591,7 +1591,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
 {
        struct bnxt *bp = netdev_priv(dev);
        u16  start = eeprom->offset, length = eeprom->len;
-       int rc;
+       int rc = 0;
 
        memset(data, 0, eeprom->len);
 
index 8de79ae63231b0ad21e47edb9a150e026adf6bd3..0e7e7da8d201c0b3af122c542ef42467c03cb94c 100644 (file)
@@ -2821,7 +2821,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                if (!g) {
                        netif_info(lio, tx_err, lio->netdev,
                                   "Transmit scatter gather: glist null!\n");
-                       goto lio_xmit_failed;
+                       goto lio_xmit_dma_failed;
                }
 
                cmdsetup.s.gather = 1;
@@ -2892,7 +2892,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
        else
                status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
        if (status == IQ_SEND_FAILED)
-               goto lio_xmit_failed;
+               goto lio_xmit_dma_failed;
 
        netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
 
@@ -2906,12 +2906,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        return NETDEV_TX_OK;
 
+lio_xmit_dma_failed:
+       dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+                        ndata.datasize, DMA_TO_DEVICE);
 lio_xmit_failed:
        stats->tx_dropped++;
        netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
                   iq_no, stats->tx_dropped);
-       dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
-                        ndata.datasize, DMA_TO_DEVICE);
        recv_buffer_free(skb);
        return NETDEV_TX_OK;
 }
index 95f17f8cadacc08637c485e555242e78aafb48c5..16ed20357c5c30d2cbb786cb414a6d4544fecf83 100644 (file)
@@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
        u32 rr_quantum;
        u8 sq_idx = sq->sq_num;
        u8 pqs_vnic;
+       int svf;
 
        if (sq->sqs_mode)
                pqs_vnic = nic->pqs_vf[vnic];
@@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
        /* 24 bytes for FCS, IPG and preamble */
        rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
 
-       tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+       if (!sq->sqs_mode) {
+               tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+       } else {
+               for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+                       if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+                               break;
+               }
+               tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
+               tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
+               tl4 += (svf * NIC_TL4_PER_LMAC);
+               tl4 += (bgx * NIC_TL4_PER_BGX);
+       }
        tl4 += sq_idx;
-       if (sq->sqs_mode)
-               tl4 += vnic * 8;
 
        tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
        nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
index 3ed21988626b5ffaceb4e72e0276c63cee1c4347..63a39ac97d53beb65e562bba0a9fb1b86f7baac7 100644 (file)
@@ -551,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
        }
 
        /* Clear rcvflt bit (latching high) and read it back */
-       bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+       if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+               bgx_reg_modify(bgx, lmacid,
+                              BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
        if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
                dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
                if (bgx->use_training) {
@@ -570,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
                return -1;
        }
 
-       /* Wait for MAC RX to be ready */
-       if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
-                        SMU_RX_CTL_STATUS, true)) {
-               dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
-               return -1;
-       }
-
        /* Wait for BGX RX to be idle */
        if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
                dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@@ -589,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
                return -1;
        }
 
-       if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
-               dev_err(&bgx->pdev->dev, "Receive fault\n");
-               return -1;
-       }
-
-       /* Receive link is latching low. Force it high and verify it */
-       bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
-       if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
-                        SPU_STATUS1_RCV_LNK, false)) {
-               dev_err(&bgx->pdev->dev, "SPU receive link down\n");
-               return -1;
-       }
-
+       /* Clear receive packet disable */
        cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
        cfg &= ~SPU_MISC_CTL_RX_DIS;
        bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
-       return 0;
+
+       /* Check for MAC RX faults */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+       /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+       cfg &= SMU_RX_CTL_STATUS;
+       if (!cfg)
+               return 0;
+
+       /* Rx local/remote fault seen.
+        * Do lmac reinit to see if condition recovers
+        */
+       bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+       return -1;
 }
 
 static void bgx_poll_for_link(struct work_struct *work)
 {
        struct lmac *lmac;
-       u64 link;
+       u64 spu_link, smu_link;
 
        lmac = container_of(work, struct lmac, dwork.work);
 
@@ -621,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work)
        bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
                     SPU_STATUS1_RCV_LNK, false);
 
-       link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
-       if (link & SPU_STATUS1_RCV_LNK) {
+       spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+       smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+       if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+           !(smu_link & SMU_RX_CTL_STATUS)) {
                lmac->link_up = 1;
                if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
                        lmac->last_speed = 40000;
@@ -636,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work)
        }
 
        if (lmac->last_link != lmac->link_up) {
+               if (lmac->link_up) {
+                       if (bgx_xaui_check_link(lmac)) {
+                               /* Errors, clear link_up state */
+                               lmac->link_up = 0;
+                               lmac->last_speed = SPEED_UNKNOWN;
+                               lmac->last_duplex = DUPLEX_UNKNOWN;
+                       }
+               }
                lmac->last_link = lmac->link_up;
-               if (lmac->link_up)
-                       bgx_xaui_check_link(lmac);
        }
 
        queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@@ -710,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
 static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
 {
        struct lmac *lmac;
-       u64 cmrx_cfg;
+       u64 cfg;
 
        lmac = &bgx->lmac[lmacid];
        if (lmac->check_link) {
@@ -719,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
                destroy_workqueue(lmac->check_link);
        }
 
-       cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
-       cmrx_cfg &= ~(1 << 15);
-       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+       /* Disable packet reception */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+       cfg &= ~CMR_PKT_RX_EN;
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+       /* Give chance for Rx/Tx FIFO to get drained */
+       bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+       bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+       /* Disable packet transmission */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+       cfg &= ~CMR_PKT_TX_EN;
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+       /* Disable serdes lanes */
+        if (!lmac->is_sgmii)
+                bgx_reg_modify(bgx, lmacid,
+                               BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+        else
+                bgx_reg_modify(bgx, lmacid,
+                               BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+       /* Disable LMAC */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+       cfg &= ~CMR_EN;
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
        bgx_flush_dmac_addrs(bgx, lmacid);
 
        if ((bgx->lmac_type != BGX_MODE_XFI) &&
index 149e179363a1e6dd71e0b7cbd00f26a5e5ef0116..42010d2e5ddf4e3d46eab4b2c9cee6caac5e252a 100644 (file)
@@ -41,6 +41,7 @@
 #define BGX_CMRX_RX_STAT10             0xC0
 #define BGX_CMRX_RX_BP_DROP            0xC8
 #define BGX_CMRX_RX_DMAC_CTL           0x0E8
+#define BGX_CMRX_RX_FIFO_LEN           0x108
 #define BGX_CMR_RX_DMACX_CAM           0x200
 #define  RX_DMACX_CAM_EN                       BIT_ULL(48)
 #define  RX_DMACX_CAM_LMACID(x)                        (x << 49)
@@ -50,6 +51,7 @@
 #define BGX_CMR_CHAN_MSK_AND           0x450
 #define BGX_CMR_BIST_STATUS            0x460
 #define BGX_CMR_RX_LMACS               0x468
+#define BGX_CMRX_TX_FIFO_LEN           0x518
 #define BGX_CMRX_TX_STAT0              0x600
 #define BGX_CMRX_TX_STAT1              0x608
 #define BGX_CMRX_TX_STAT2              0x610
index a2cdfc1261dc77092049575f5519d7160a1c8f29..50812a1d67bdf8aab1ca8cb5fe560c7c5e2cb778 100644 (file)
@@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
        CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
        CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+       CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
        CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
index c4b262ca7d43623fba1cef20dfc6c03ebeadfd7f..2accab38632327ae007baf1ef293dca1ef7da7d9 100644 (file)
@@ -36,8 +36,8 @@
 #define __T4FW_VERSION_H__
 
 #define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0E
-#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_MINOR 0x0F
+#define T4FW_VERSION_MICRO 0x25
 #define T4FW_VERSION_BUILD 0x00
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
 #define T4FW_MIN_VERSION_MICRO 0x00
 
 #define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0E
-#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_MINOR 0x0F
+#define T5FW_VERSION_MICRO 0x25
 #define T5FW_VERSION_BUILD 0x00
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
 #define T5FW_MIN_VERSION_MICRO 0x00
 
 #define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0E
-#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_MINOR 0x0F
+#define T6FW_VERSION_MICRO 0x25
 #define T6FW_VERSION_BUILD 0x00
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
index 41b01064510098c00a66d86bb49cf2f3fe02f9c3..4466a11871109b41861cd1be9ff3dc70d207512f 100644 (file)
@@ -860,6 +860,11 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int entry;
        void *dest;
 
+       if (skb_put_padto(skb, ETHOC_ZLEN)) {
+               dev->stats.tx_errors++;
+               goto out_no_free;
+       }
+
        if (unlikely(skb->len > ETHOC_BUFSIZ)) {
                dev->stats.tx_errors++;
                goto out;
@@ -894,6 +899,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_tx_timestamp(skb);
 out:
        dev_kfree_skb(skb);
+out_no_free:
        return NETDEV_TX_OK;
 }
 
@@ -1086,7 +1092,7 @@ static int ethoc_probe(struct platform_device *pdev)
        if (!priv->iobase) {
                dev_err(&pdev->dev, "cannot remap I/O memory space\n");
                ret = -ENXIO;
-               goto error;
+               goto free;
        }
 
        if (netdev->mem_end) {
@@ -1095,7 +1101,7 @@ static int ethoc_probe(struct platform_device *pdev)
                if (!priv->membase) {
                        dev_err(&pdev->dev, "cannot remap memory space\n");
                        ret = -ENXIO;
-                       goto error;
+                       goto free;
                }
        } else {
                /* Allocate buffer memory */
@@ -1106,7 +1112,7 @@ static int ethoc_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
                                buffer_size);
                        ret = -ENOMEM;
-                       goto error;
+                       goto free;
                }
                netdev->mem_end = netdev->mem_start + buffer_size;
                priv->dma_alloc = buffer_size;
@@ -1120,7 +1126,7 @@ static int ethoc_probe(struct platform_device *pdev)
                128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
        if (num_bd < 4) {
                ret = -ENODEV;
-               goto error;
+               goto free;
        }
        priv->num_bd = num_bd;
        /* num_tx must be a power of two */
@@ -1133,7 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
        priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
        if (!priv->vma) {
                ret = -ENOMEM;
-               goto error;
+               goto free;
        }
 
        /* Allow the platform setup code to pass in a MAC address. */
@@ -1195,7 +1201,7 @@ static int ethoc_probe(struct platform_device *pdev)
        priv->mdio = mdiobus_alloc();
        if (!priv->mdio) {
                ret = -ENOMEM;
-               goto free;
+               goto free2;
        }
 
        priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1214,7 @@ static int ethoc_probe(struct platform_device *pdev)
        ret = mdiobus_register(priv->mdio);
        if (ret) {
                dev_err(&netdev->dev, "failed to register MDIO bus\n");
-               goto free;
+               goto free2;
        }
 
        ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1247,10 @@ error2:
 error:
        mdiobus_unregister(priv->mdio);
        mdiobus_free(priv->mdio);
-free:
+free2:
        if (priv->clk)
                clk_disable_unprepare(priv->clk);
+free:
        free_netdev(netdev);
 out:
        return ret;
index 085f9125cf42a6c1aa76bbb6744ba5b9ba919d31..9b7a3f5a2818f1fdc7dfe183991a2a7d3ab03523 100644 (file)
@@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
                 * re-adding ourselves to the poll list.
                 */
 
-               if (priv->tx_skb && !tx_ctrl_ct)
+               if (priv->tx_skb && !tx_ctrl_ct) {
+                       nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
                        napi_reschedule(napi);
+               }
        }
 
        return work_done;
@@ -283,6 +285,7 @@ static void nps_enet_hw_reset(struct net_device *ndev)
        ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
        nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
        usleep_range(10, 20);
+       ge_rst_value = 0;
        nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
 
        /* Tx fifo reset sequence */
index ca2cccc594fdc240d4951cd8deb0b6232aa80c2f..fea0f330ddbdeb2a5cf99b81f30e46919d066f62 100644 (file)
@@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
                                         fec16_to_cpu(bdp->cbd_datlen),
                                         DMA_TO_DEVICE);
                bdp->cbd_bufaddr = cpu_to_fec32(0);
-               if (!skb) {
-                       bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
-                       continue;
-               }
+               if (!skb)
+                       goto skb_done;
 
                /* Check for errors. */
                if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
-
+skb_done:
                /* Make sure the update to bdp and tx_skbuff are performed
                 * before dirty_tx
                 */
@@ -2418,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
                return -EOPNOTSUPP;
 
        if (ec->rx_max_coalesced_frames > 255) {
-               pr_err("Rx coalesced frames exceed hardware limiation");
+               pr_err("Rx coalesced frames exceed hardware limitation\n");
                return -EINVAL;
        }
 
        if (ec->tx_max_coalesced_frames > 255) {
-               pr_err("Tx coalesced frame exceed hardware limiation");
+               pr_err("Tx coalesced frame exceed hardware limitation\n");
                return -EINVAL;
        }
 
        cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
        if (cycle > 0xFFFF) {
-               pr_err("Rx coalesed usec exceeed hardware limiation");
+               pr_err("Rx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
        cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
        if (cycle > 0xFFFF) {
-               pr_err("Rx coalesed usec exceeed hardware limiation");
+               pr_err("Rx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
index 7615e0668acbf9b223b2087ed89646146dc6e827..2e6785b6e8bee1d971d739e160abe1ce17da9ca0 100644 (file)
@@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                                 tx_queue->tx_ring_size);
 
        if (likely(!nr_frags)) {
-               lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+               if (likely(!do_tstamp))
+                       lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
        } else {
                u32 lstatus_start = lstatus;
 
index 3d746c887873836ac0c0c0590b2940f3b65382b3..67a648c7d3a9e14d51ebbe650ec9b72c8a61015d 100644 (file)
@@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
        u32 link_stat = priv->link;
        struct hnae_handle *h;
 
-       assert(priv && priv->ae_handle);
        h = priv->ae_handle;
 
        if (priv->phy) {
@@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
 {
        struct hns_nic_priv *priv = netdev_priv(net_dev);
 
-       assert(priv);
-
        strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
                sizeof(drvinfo->version));
        drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
        struct hnae_handle *h;
        struct hnae_ae_ops *ops;
 
-       assert(priv || priv->ae_handle);
-
        h = priv->ae_handle;
        ops = h->dev->ops;
 
@@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
        struct hnae_ae_ops *ops;
        int ret;
 
-       assert(priv || priv->ae_handle);
-
        ops = priv->ae_handle->dev->ops;
 
        if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
@@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
        struct hns_nic_priv *priv = netdev_priv(net_dev);
        struct hnae_ae_ops *ops;
 
-       assert(priv || priv->ae_handle);
-
        ops = priv->ae_handle->dev->ops;
 
        cmd->version = HNS_CHIP_VERSION;
@@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
        struct hns_nic_priv *priv = netdev_priv(net_dev);
        struct hnae_ae_ops *ops;
 
-       assert(priv || priv->ae_handle);
-
        ops = priv->ae_handle->dev->ops;
        if (!ops->get_regs_len) {
                netdev_err(net_dev, "ops->get_regs_len is null!\n");
index 864cb21351a410bf1cd7059cf9952a6010784860..88f3c85fb04ada575e367042d79f4b8188e12245 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/uaccess.h>
 #include <asm/firmware.h>
 #include <linux/seq_file.h>
+#include <linux/workqueue.h>
 
 #include "ibmvnic.h"
 
@@ -89,6 +90,7 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
 static int ibmvnic_remove(struct vio_dev *);
 static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
@@ -469,7 +471,8 @@ static int ibmvnic_open(struct net_device *netdev)
        crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
        ibmvnic_send_crq(adapter, &crq);
 
-       netif_start_queue(netdev);
+       netif_tx_start_all_queues(netdev);
+
        return 0;
 
 bounce_map_failed:
@@ -519,7 +522,7 @@ static int ibmvnic_close(struct net_device *netdev)
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_disable(&adapter->napi[i]);
 
-       netif_stop_queue(netdev);
+       netif_tx_stop_all_queues(netdev);
 
        if (adapter->bounce_buffer) {
                if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -1212,12 +1215,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
                goto reg_failed;
        }
 
-       scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
-       if (scrq->irq == NO_IRQ) {
-               dev_err(dev, "Error mapping irq\n");
-               goto map_irq_failed;
-       }
-
        scrq->adapter = adapter;
        scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
        scrq->cur = 0;
@@ -1230,12 +1227,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
 
        return scrq;
 
-map_irq_failed:
-       do {
-               rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
-                                       adapter->vdev->unit_address,
-                                       scrq->crq_num);
-       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 reg_failed:
        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
                         DMA_BIDIRECTIONAL);
@@ -1256,6 +1247,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                        if (adapter->tx_scrq[i]) {
                                free_irq(adapter->tx_scrq[i]->irq,
                                         adapter->tx_scrq[i]);
+                               irq_dispose_mapping(adapter->tx_scrq[i]->irq);
                                release_sub_crq_queue(adapter,
                                                      adapter->tx_scrq[i]);
                        }
@@ -1267,6 +1259,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                        if (adapter->rx_scrq[i]) {
                                free_irq(adapter->rx_scrq[i]->irq,
                                         adapter->rx_scrq[i]);
+                               irq_dispose_mapping(adapter->rx_scrq[i]->irq);
                                release_sub_crq_queue(adapter,
                                                      adapter->rx_scrq[i]);
                        }
@@ -1276,6 +1269,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
        adapter->requested_caps = 0;
 }
 
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+       int i;
+
+       if (adapter->tx_scrq) {
+               for (i = 0; i < adapter->req_tx_queues; i++)
+                       if (adapter->tx_scrq[i])
+                               release_sub_crq_queue(adapter,
+                                                     adapter->tx_scrq[i]);
+               adapter->tx_scrq = NULL;
+       }
+
+       if (adapter->rx_scrq) {
+               for (i = 0; i < adapter->req_rx_queues; i++)
+                       if (adapter->rx_scrq[i])
+                               release_sub_crq_queue(adapter,
+                                                     adapter->rx_scrq[i]);
+               adapter->rx_scrq = NULL;
+       }
+
+       adapter->requested_caps = 0;
+}
+
 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
                            struct ibmvnic_sub_crq_queue *scrq)
 {
@@ -1395,6 +1411,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
        return IRQ_HANDLED;
 }
 
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+       struct device *dev = &adapter->vdev->dev;
+       struct ibmvnic_sub_crq_queue *scrq;
+       int i = 0, j = 0;
+       int rc = 0;
+
+       for (i = 0; i < adapter->req_tx_queues; i++) {
+               scrq = adapter->tx_scrq[i];
+               scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+               if (scrq->irq == NO_IRQ) {
+                       rc = -EINVAL;
+                       dev_err(dev, "Error mapping irq\n");
+                       goto req_tx_irq_failed;
+               }
+
+               rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+                                0, "ibmvnic_tx", scrq);
+
+               if (rc) {
+                       dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+                               scrq->irq, rc);
+                       irq_dispose_mapping(scrq->irq);
+                       goto req_rx_irq_failed;
+               }
+       }
+
+       for (i = 0; i < adapter->req_rx_queues; i++) {
+               scrq = adapter->rx_scrq[i];
+               scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+               if (scrq->irq == NO_IRQ) {
+                       rc = -EINVAL;
+                       dev_err(dev, "Error mapping irq\n");
+                       goto req_rx_irq_failed;
+               }
+               rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+                                0, "ibmvnic_rx", scrq);
+               if (rc) {
+                       dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+                               scrq->irq, rc);
+                       irq_dispose_mapping(scrq->irq);
+                       goto req_rx_irq_failed;
+               }
+       }
+       return rc;
+
+req_rx_irq_failed:
+       for (j = 0; j < i; j++)
+               free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+               irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+       i = adapter->req_tx_queues;
+req_tx_irq_failed:
+       for (j = 0; j < i; j++)
+               free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+               irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+       release_sub_crqs_no_irqs(adapter);
+       return rc;
+}
+
 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
 {
        struct device *dev = &adapter->vdev->dev;
@@ -1403,8 +1479,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        union ibmvnic_crq crq;
        int total_queues;
        int more = 0;
-       int i, j;
-       int rc;
+       int i;
 
        if (!retry) {
                /* Sub-CRQ entries are 32 byte long */
@@ -1483,13 +1558,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        for (i = 0; i < adapter->req_tx_queues; i++) {
                adapter->tx_scrq[i] = allqueues[i];
                adapter->tx_scrq[i]->pool_index = i;
-               rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
-                                0, "ibmvnic_tx", adapter->tx_scrq[i]);
-               if (rc) {
-                       dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
-                               adapter->tx_scrq[i]->irq, rc);
-                       goto req_tx_irq_failed;
-               }
        }
 
        adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -1500,13 +1568,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        for (i = 0; i < adapter->req_rx_queues; i++) {
                adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
                adapter->rx_scrq[i]->scrq_num = i;
-               rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
-                                0, "ibmvnic_rx", adapter->rx_scrq[i]);
-               if (rc) {
-                       dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
-                               adapter->rx_scrq[i]->irq, rc);
-                       goto req_rx_irq_failed;
-               }
        }
 
        memset(&crq, 0, sizeof(crq));
@@ -1559,15 +1620,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
 
        return;
 
-req_rx_irq_failed:
-       for (j = 0; j < i; j++)
-               free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
-       i = adapter->req_tx_queues;
-req_tx_irq_failed:
-       for (j = 0; j < i; j++)
-               free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
-       kfree(adapter->rx_scrq);
-       adapter->rx_scrq = NULL;
 rx_failed:
        kfree(adapter->tx_scrq);
        adapter->tx_scrq = NULL;
@@ -2121,7 +2173,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
                                  struct ibmvnic_adapter *adapter)
 {
        struct device *dev = &adapter->vdev->dev;
-       struct ibmvnic_error_buff *error_buff;
+       struct ibmvnic_error_buff *error_buff, *tmp;
        unsigned long flags;
        bool found = false;
        int i;
@@ -2133,7 +2185,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
        }
 
        spin_lock_irqsave(&adapter->error_list_lock, flags);
-       list_for_each_entry(error_buff, &adapter->errors, list)
+       list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
                if (error_buff->error_id == crq->request_error_rsp.error_id) {
                        found = true;
                        list_del(&error_buff->list);
@@ -2348,9 +2400,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
                         *req_value,
                         (long int)be32_to_cpu(crq->request_capability_rsp.
                                               number), name);
-               release_sub_crqs(adapter);
+               release_sub_crqs_no_irqs(adapter);
                *req_value = be32_to_cpu(crq->request_capability_rsp.number);
-               complete(&adapter->init_done);
+               init_sub_crqs(adapter, 1);
                return;
        default:
                dev_err(dev, "Error %d in request cap rsp\n",
@@ -2659,7 +2711,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
 
 out:
        if (atomic_read(&adapter->running_cap_queries) == 0)
-               complete(&adapter->init_done);
+               init_sub_crqs(adapter, 0);
                /* We're done querying the capabilities, initialize sub-crqs */
 }
 
@@ -3141,14 +3193,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
 
 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
 {
-       struct ibmvnic_inflight_cmd *inflight_cmd;
+       struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
        struct device *dev = &adapter->vdev->dev;
-       struct ibmvnic_error_buff *error_buff;
+       struct ibmvnic_error_buff *error_buff, *tmp2;
        unsigned long flags;
        unsigned long flags2;
 
        spin_lock_irqsave(&adapter->inflight_lock, flags);
-       list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+       list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
                switch (inflight_cmd->crq.generic.cmd) {
                case LOGIN:
                        dma_unmap_single(dev, adapter->login_buf_token,
@@ -3165,8 +3217,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
                        break;
                case REQUEST_ERROR_INFO:
                        spin_lock_irqsave(&adapter->error_list_lock, flags2);
-                       list_for_each_entry(error_buff, &adapter->errors,
-                                           list) {
+                       list_for_each_entry_safe(error_buff, tmp2,
+                                                &adapter->errors, list) {
                                dma_unmap_single(dev, error_buff->dma,
                                                 error_buff->len,
                                                 DMA_FROM_DEVICE);
@@ -3202,8 +3254,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                        dev_info(dev, "Partner initialized\n");
                        /* Send back a response */
                        rc = ibmvnic_send_crq_init_complete(adapter);
-                       if (rc == 0)
-                               send_version_xchg(adapter);
+                       if (!rc)
+                               schedule_work(&adapter->vnic_crq_init);
                        else
                                dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
                        break;
@@ -3555,8 +3607,63 @@ static const struct file_operations ibmvnic_dump_ops = {
        .release        = single_release,
 };
 
+static void handle_crq_init_rsp(struct work_struct *work)
+{
+       struct ibmvnic_adapter *adapter = container_of(work,
+                                                      struct ibmvnic_adapter,
+                                                      vnic_crq_init);
+       struct device *dev = &adapter->vdev->dev;
+       struct net_device *netdev = adapter->netdev;
+       unsigned long timeout = msecs_to_jiffies(30000);
+       int rc;
+
+       send_version_xchg(adapter);
+       reinit_completion(&adapter->init_done);
+       if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+               dev_err(dev, "Passive init timeout\n");
+               goto task_failed;
+       }
+
+       do {
+               if (adapter->renegotiate) {
+                       adapter->renegotiate = false;
+                       release_sub_crqs_no_irqs(adapter);
+                       send_cap_queries(adapter);
+
+                       reinit_completion(&adapter->init_done);
+                       if (!wait_for_completion_timeout(&adapter->init_done,
+                                                        timeout)) {
+                               dev_err(dev, "Passive init timeout\n");
+                               goto task_failed;
+                       }
+               }
+       } while (adapter->renegotiate);
+       rc = init_sub_crq_irqs(adapter);
+
+       if (rc)
+               goto task_failed;
+
+       netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+       rc = register_netdev(netdev);
+       if (rc) {
+               dev_err(dev,
+                       "failed to register netdev rc=%d\n", rc);
+               goto register_failed;
+       }
+       dev_info(dev, "ibmvnic registered\n");
+
+       return;
+
+register_failed:
+       release_sub_crqs(adapter);
+task_failed:
+       dev_err(dev, "Passive initialization was not successful\n");
+}
+
 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 {
+       unsigned long timeout = msecs_to_jiffies(30000);
        struct ibmvnic_adapter *adapter;
        struct net_device *netdev;
        unsigned char *mac_addr_p;
@@ -3593,6 +3700,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        netdev->ethtool_ops = &ibmvnic_ethtool_ops;
        SET_NETDEV_DEV(netdev, &dev->dev);
 
+       INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+
        spin_lock_init(&adapter->stats_lock);
 
        rc = ibmvnic_init_crq_queue(adapter);
@@ -3635,30 +3744,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        ibmvnic_send_crq_init(adapter);
 
        init_completion(&adapter->init_done);
-       wait_for_completion(&adapter->init_done);
+       if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+               return 0;
 
        do {
-               adapter->renegotiate = false;
-
-               init_sub_crqs(adapter, 0);
-               reinit_completion(&adapter->init_done);
-               wait_for_completion(&adapter->init_done);
-
                if (adapter->renegotiate) {
-                       release_sub_crqs(adapter);
+                       adapter->renegotiate = false;
+                       release_sub_crqs_no_irqs(adapter);
                        send_cap_queries(adapter);
 
                        reinit_completion(&adapter->init_done);
-                       wait_for_completion(&adapter->init_done);
+                       if (!wait_for_completion_timeout(&adapter->init_done,
+                                                        timeout))
+                               return 0;
                }
        } while (adapter->renegotiate);
 
-       /* if init_sub_crqs is partially successful, retry */
-       while (!adapter->tx_scrq || !adapter->rx_scrq) {
-               init_sub_crqs(adapter, 1);
-
-               reinit_completion(&adapter->init_done);
-               wait_for_completion(&adapter->init_done);
+       rc = init_sub_crq_irqs(adapter);
+       if (rc) {
+               dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+               goto free_debugfs;
        }
 
        netdev->real_num_tx_queues = adapter->req_tx_queues;
@@ -3666,12 +3771,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        rc = register_netdev(netdev);
        if (rc) {
                dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
-               goto free_debugfs;
+               goto free_sub_crqs;
        }
        dev_info(&dev->dev, "ibmvnic registered\n");
 
        return 0;
 
+free_sub_crqs:
+       release_sub_crqs(adapter);
 free_debugfs:
        if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
                debugfs_remove_recursive(adapter->debugfs_dir);
index 0b66a506a4e44e4d36aae861c8ba6289318cd8e1..e82898fd518ef890afbbf79c307833b8127e782c 100644 (file)
@@ -1045,4 +1045,6 @@ struct ibmvnic_adapter {
        u64 opt_rxba_entries_per_subcrq;
        __be64 tx_rx_desc_req;
        u8 map_id;
+
+       struct work_struct vnic_crq_init;
 };
index 75e60897b7e748bca73317ae0e6d9247e6a9bbf9..2b2e2f8c636994219e997bdb53ff8d3379275bb9 100644 (file)
@@ -2789,7 +2789,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
 }
 
 /**
- * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
  * @adapter: board private structure to initialize
  **/
 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
@@ -6915,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
        if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
                features &= ~NETIF_F_RXFCS;
 
+       /* Since there is no support for separate Rx/Tx vlan accel
+        * enable/disable make sure Tx flag is always in same state as Rx.
+        */
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               features |= NETIF_F_HW_VLAN_CTAG_TX;
+       else
+               features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
        return features;
 }
 
index 5ea22008d721d0a0ad769f32b7ee74ef8b19249d..501f15d9f4d6eef599733f12e7052fedc1563186 100644 (file)
@@ -1344,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
        if (!vsi || !macaddr)
                return NULL;
 
+       /* Do not allow broadcast filter to be added since broadcast filter
+        * is added as part of add VSI for any newly created VSI except
+        * FDIR VSI
+        */
+       if (is_broadcast_ether_addr(macaddr))
+               return NULL;
+
        f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
        if (!f) {
                f = kzalloc(sizeof(*f), GFP_ATOMIC);
@@ -2151,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                                         aq_ret, pf->hw.aq.asq_last_status);
                        }
                }
-               aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
-                                                  vsi->seid,
-                                                  cur_promisc, NULL);
-               if (aq_ret) {
-                       retval = i40e_aq_rc_to_posix(aq_ret,
-                                                    pf->hw.aq.asq_last_status);
-                       dev_info(&pf->pdev->dev,
-                                "set brdcast promisc failed, err %s, aq_err %s\n",
-                                i40e_stat_str(&pf->hw, aq_ret),
-                                i40e_aq_str(&pf->hw,
-                                            pf->hw.aq.asq_last_status));
-               }
        }
 out:
        /* if something went wrong then set the changed flag so we try again */
@@ -7726,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
  * @vsi: the VSI being configured
  * @v_idx: index of the vector in the vsi struct
+ * @cpu: cpu to be used on affinity_mask
  *
  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
  **/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
 {
        struct i40e_q_vector *q_vector;
 
@@ -7740,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
 
        q_vector->vsi = vsi;
        q_vector->v_idx = v_idx;
-       cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+       cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+
        if (vsi->netdev)
                netif_napi_add(vsi->netdev, &q_vector->napi,
                               i40e_napi_poll, NAPI_POLL_WEIGHT);
@@ -7764,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
-       int v_idx, num_q_vectors;
-       int err;
+       int err, v_idx, num_q_vectors, current_cpu;
 
        /* if not MSIX, give the one vector only to the LAN VSI */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -7775,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
        else
                return -EINVAL;
 
+       current_cpu = cpumask_first(cpu_online_mask);
+
        for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+               err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
                if (err)
                        goto err_out;
+               current_cpu = cpumask_next(current_cpu, cpu_online_mask);
+               if (unlikely(current_cpu >= nr_cpu_ids))
+                       current_cpu = cpumask_first(cpu_online_mask);
        }
 
        return 0;
@@ -9224,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
 static int i40e_add_vsi(struct i40e_vsi *vsi)
 {
        int ret = -ENODEV;
+       i40e_status aq_ret = 0;
        u8 laa_macaddr[ETH_ALEN];
        bool found_laa_mac_filter = false;
        struct i40e_pf *pf = vsi->back;
@@ -9413,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                vsi->seid = ctxt.seid;
                vsi->id = ctxt.vsi_number;
        }
+       /* Except FDIR VSI, for all othet VSI set the broadcast filter */
+       if (vsi->type != I40E_VSI_FDIR) {
+               aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+               if (aq_ret) {
+                       ret = i40e_aq_rc_to_posix(aq_ret,
+                                                 hw->aq.asq_last_status);
+                       dev_info(&pf->pdev->dev,
+                                "set brdcast promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(hw, aq_ret),
+                                i40e_aq_str(hw, hw->aq.asq_last_status));
+               }
+       }
 
        spin_lock_bh(&vsi->mac_filter_list_lock);
        /* If macvlan filters already exist, force them to get loaded */
index 55f151fca1dcb785089511105920755fef9397b1..a8868e1bf832557ffa7c68d2840a0b098fdf874d 100644 (file)
@@ -1280,8 +1280,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    union i40e_rx_desc *rx_desc)
 {
        struct i40e_rx_ptype_decoded decoded;
-       bool ipv4, ipv6, tunnel = false;
        u32 rx_error, rx_status;
+       bool ipv4, ipv6;
        u8 ptype;
        u64 qword;
 
@@ -1336,19 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
-       /* The hardware supported by this driver does not validate outer
-        * checksums for tunneled VXLAN or GENEVE frames.  I don't agree
-        * with it but the specification states that you "MAY validate", it
-        * doesn't make it a hard requirement so if we have validated the
-        * inner checksum report CHECKSUM_UNNECESSARY.
+       /* If there is an outer header present that might contain a checksum
+        * we need to bump the checksum level by 1 to reflect the fact that
+        * we are indicating we validated the inner checksum.
         */
-       if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
-                                 I40E_RX_PTYPE_INNER_PROT_UDP |
-                                 I40E_RX_PTYPE_INNER_PROT_SCTP))
-               tunnel = true;
-
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       skb->csum_level = tunnel ? 1 : 0;
+       if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+               skb->csum_level = 1;
+
+       /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+       switch (decoded.inner_prot) {
+       case I40E_RX_PTYPE_INNER_PROT_TCP:
+       case I40E_RX_PTYPE_INNER_PROT_UDP:
+       case I40E_RX_PTYPE_INNER_PROT_SCTP:
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               /* fall though */
+       default:
+               break;
+       }
 
        return;
 
index be99189da925fc5862e9c318489694341a3a68f9..79d99cd91b24317d0cc970a6bdedbb24abeac356 100644 (file)
@@ -752,8 +752,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    union i40e_rx_desc *rx_desc)
 {
        struct i40e_rx_ptype_decoded decoded;
-       bool ipv4, ipv6, tunnel = false;
        u32 rx_error, rx_status;
+       bool ipv4, ipv6;
        u8 ptype;
        u64 qword;
 
@@ -808,19 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
-       /* The hardware supported by this driver does not validate outer
-        * checksums for tunneled VXLAN or GENEVE frames.  I don't agree
-        * with it but the specification states that you "MAY validate", it
-        * doesn't make it a hard requirement so if we have validated the
-        * inner checksum report CHECKSUM_UNNECESSARY.
+       /* If there is an outer header present that might contain a checksum
+        * we need to bump the checksum level by 1 to reflect the fact that
+        * we are indicating we validated the inner checksum.
         */
-       if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
-                                 I40E_RX_PTYPE_INNER_PROT_UDP |
-                                 I40E_RX_PTYPE_INNER_PROT_SCTP))
-               tunnel = true;
-
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       skb->csum_level = tunnel ? 1 : 0;
+       if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+               skb->csum_level = 1;
+
+       /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+       switch (decoded.inner_prot) {
+       case I40E_RX_PTYPE_INNER_PROT_TCP:
+       case I40E_RX_PTYPE_INNER_PROT_UDP:
+       case I40E_RX_PTYPE_INNER_PROT_SCTP:
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               /* fall though */
+       default:
+               break;
+       }
 
        return;
 
index 088c47cf27d97d0f5a8a40992c95ec7c7a761947..8bebd862a54ccd7f4f3defe4fd1a1035188294d6 100644 (file)
@@ -2887,7 +2887,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
 
-       return 0;
+       return min(work_done, budget - 1);
 }
 
 /**
index 61a80da8b6f0dec4323f094a245fc3fcbb6bcab0..2819abc454c7e71c024ab280ec3a5a1780a07a4b 100644 (file)
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
 static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -IXGBE_ERR_MBX;
+       s32 ret_val = IXGBE_ERR_MBX;
 
        if (!mbx->ops.read)
                goto out;
@@ -111,7 +111,7 @@ out:
 static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -IXGBE_ERR_MBX;
+       s32 ret_val = IXGBE_ERR_MBX;
 
        /* exit if either we can't write or there isn't a defined timeout */
        if (!mbx->ops.write || !mbx->timeout)
index a6d26d351dfc47c777b39c04a44c2f17bca0feab..f92018b13d2869e57ec307029072e3b85ac5041a 100644 (file)
 /* Various constants */
 
 /* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS                1
+#define MVNETA_TXDONE_COAL_PKTS                0       /* interrupt per packet */
 #define MVNETA_RX_COAL_PKTS            32
 #define MVNETA_RX_COAL_USEC            100
 
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
        return 0;
 
 err_free_irq:
+       unregister_cpu_notifier(&pp->cpu_notifier);
+       on_each_cpu(mvneta_percpu_disable, pp, true);
        free_percpu_irq(pp->dev->irq, pp->ports);
 err_cleanup_txqs:
        mvneta_cleanup_txqs(pp);
index 01fccec632eca82384a404e3da9388b1b30a27db..466939f8f0cfce1e0f6b11aad59700ab2866e194 100644 (file)
@@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
                hwbm_pool->construct = mvneta_bm_construct;
                hwbm_pool->priv = new_pool;
+               spin_lock_init(&hwbm_pool->lock);
 
                /* Create new pool */
                err = mvneta_bm_pool_create(priv, new_pool);
index c984462fad2a26fa267faab7707181a19c1d1814..d1cdc2d7615158f612885e100a0cc274c22a3fe9 100644 (file)
@@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
 static void mtk_phy_link_adjust(struct net_device *dev)
 {
        struct mtk_mac *mac = netdev_priv(dev);
+       u16 lcl_adv = 0, rmt_adv = 0;
+       u8 flowctrl;
        u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
                  MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
                  MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
        if (mac->phy_dev->link)
                mcr |= MAC_MCR_FORCE_LINK;
 
-       if (mac->phy_dev->duplex)
+       if (mac->phy_dev->duplex) {
                mcr |= MAC_MCR_FORCE_DPX;
 
-       if (mac->phy_dev->pause)
-               mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+               if (mac->phy_dev->pause)
+                       rmt_adv = LPA_PAUSE_CAP;
+               if (mac->phy_dev->asym_pause)
+                       rmt_adv |= LPA_PAUSE_ASYM;
+
+               if (mac->phy_dev->advertising & ADVERTISED_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_CAP;
+               if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+                       lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+               flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+               if (flowctrl & FLOW_CTRL_TX)
+                       mcr |= MAC_MCR_FORCE_TX_FC;
+               if (flowctrl & FLOW_CTRL_RX)
+                       mcr |= MAC_MCR_FORCE_RX_FC;
+
+               netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+                         flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+                         flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+       }
 
        mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 
@@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        u32 val, ge_mode;
 
        np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+       if (!np && of_phy_is_fixed_link(mac->of_node))
+               if (!of_phy_register_fixed_link(mac->of_node))
+                       np = of_node_get(mac->of_node);
        if (!np)
                return -ENODEV;
 
        switch (of_get_phy_mode(np)) {
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_ID:
        case PHY_INTERFACE_MODE_RGMII:
                ge_mode = 0;
                break;
@@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
        mac->phy_dev->autoneg = AUTONEG_ENABLE;
        mac->phy_dev->speed = 0;
        mac->phy_dev->duplex = 0;
-       mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+       mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+                                  SUPPORTED_Asym_Pause;
        mac->phy_dev->advertising = mac->phy_dev->supported |
                                    ADVERTISED_Autoneg;
        phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
        return 0;
 
 err_free_bus:
-       kfree(eth->mii_bus);
+       mdiobus_free(eth->mii_bus);
 
 err_put_node:
        of_node_put(mii_np);
@@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
 
        mdiobus_unregister(eth->mii_bus);
        of_node_put(eth->mii_bus->dev.of_node);
-       kfree(eth->mii_bus);
+       mdiobus_free(eth->mii_bus);
 }
 
 static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -453,20 +481,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
 /* the qdma core needs scratch memory to be setup */
 static int mtk_init_fq_dma(struct mtk_eth *eth)
 {
-       dma_addr_t phy_ring_head, phy_ring_tail;
+       dma_addr_t phy_ring_tail;
        int cnt = MTK_DMA_SIZE;
        dma_addr_t dma_addr;
        int i;
 
        eth->scratch_ring = dma_alloc_coherent(eth->dev,
                                               cnt * sizeof(struct mtk_tx_dma),
-                                              &phy_ring_head,
+                                              &eth->phy_scratch_ring,
                                               GFP_ATOMIC | __GFP_ZERO);
        if (unlikely(!eth->scratch_ring))
                return -ENOMEM;
 
        eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
                                    GFP_KERNEL);
+       if (unlikely(!eth->scratch_head))
+               return -ENOMEM;
+
        dma_addr = dma_map_single(eth->dev,
                                  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
                                  DMA_FROM_DEVICE);
@@ -474,19 +505,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
                return -ENOMEM;
 
        memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
-       phy_ring_tail = phy_ring_head +
+       phy_ring_tail = eth->phy_scratch_ring +
                        (sizeof(struct mtk_tx_dma) * (cnt - 1));
 
        for (i = 0; i < cnt; i++) {
                eth->scratch_ring[i].txd1 =
                                        (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
                if (i < cnt - 1)
-                       eth->scratch_ring[i].txd2 = (phy_ring_head +
+                       eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
                                ((i + 1) * sizeof(struct mtk_tx_dma)));
                eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
        }
 
-       mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+       mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
        mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
        mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
        mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@@ -643,7 +674,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
 
 err_dma:
        do {
-               tx_buf = mtk_desc_to_tx_buf(ring, txd);
+               tx_buf = mtk_desc_to_tx_buf(ring, itxd);
 
                /* unmap dma */
                mtk_tx_unmap(&dev->dev, tx_buf);
@@ -673,6 +704,20 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
        return nfrags;
 }
 
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+       int i;
+
+       for (i = 0; i < MTK_MAC_COUNT; i++) {
+               if (!eth->netdev[i])
+                       continue;
+               if (netif_queue_stopped(eth->netdev[i]))
+                       return 1;
+       }
+
+       return 0;
+}
+
 static void mtk_wake_queue(struct mtk_eth *eth)
 {
        int i;
@@ -738,12 +783,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
                goto drop;
 
-       if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
+       if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
                mtk_stop_queue(eth);
-               if (unlikely(atomic_read(&ring->free_count) >
-                            ring->thresh))
-                       mtk_wake_queue(eth);
-       }
+
        spin_unlock_irqrestore(&eth->page_lock, flags);
 
        return NETDEV_TX_OK;
@@ -798,6 +840,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                                          DMA_FROM_DEVICE);
                if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
                        skb_free_frag(new_data);
+                       netdev->stats.rx_dropped++;
                        goto release_desc;
                }
 
@@ -805,6 +848,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
                skb = build_skb(data, ring->frag_size);
                if (unlikely(!skb)) {
                        put_page(virt_to_head_page(new_data));
+                       netdev->stats.rx_dropped++;
                        goto release_desc;
                }
                skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
@@ -893,7 +937,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
                }
                mtk_tx_unmap(eth->dev, tx_buf);
 
-               ring->last_free->txd2 = next_cpu;
                ring->last_free = desc;
                atomic_inc(&ring->free_count);
 
@@ -918,7 +961,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
        if (!total)
                return 0;
 
-       if (atomic_read(&ring->free_count) > ring->thresh)
+       if (mtk_queue_stopped(eth) &&
+           (atomic_read(&ring->free_count) > ring->thresh))
                mtk_wake_queue(eth);
 
        return total;
@@ -999,9 +1043,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
 
        atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
        ring->next_free = &ring->dma[0];
-       ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
-       ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
-                             MAX_SKB_FRAGS);
+       ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+       ring->thresh = MAX_SKB_FRAGS;
 
        /* make sure that all changes to the dma ring are flushed before we
         * continue
@@ -1179,6 +1222,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
        for (i = 0; i < MTK_MAC_COUNT; i++)
                if (eth->netdev[i])
                        netdev_reset_queue(eth->netdev[i]);
+       if (eth->scratch_ring) {
+               dma_free_coherent(eth->dev,
+                                 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+                                 eth->scratch_ring,
+                                 eth->phy_scratch_ring);
+               eth->scratch_ring = NULL;
+               eth->phy_scratch_ring = 0;
+       }
        mtk_tx_clean(eth);
        mtk_rx_clean(eth);
        kfree(eth->scratch_head);
@@ -1241,7 +1292,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
        mtk_w32(eth,
                MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
                MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
-               MTK_RX_BT_32DWORDS,
+               MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
                MTK_QDMA_GLO_CFG);
 
        return 0;
@@ -1355,7 +1406,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
 
        /* disable delay and normal interrupt */
        mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
-       mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+       mtk_irq_disable(eth, ~0);
        mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
        mtk_w32(eth, 0, MTK_RST_GL);
 
@@ -1669,7 +1720,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
        mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
 
        SET_NETDEV_DEV(eth->netdev[id], eth->dev);
-       eth->netdev[id]->watchdog_timeo = HZ;
+       eth->netdev[id]->watchdog_timeo = 5 * HZ;
        eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
        eth->netdev[id]->base_addr = (unsigned long)eth->base;
        eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
index eed626d56ea4f1d3df5f2ed3aacbd8bded28cead..a5eb7c62306b66116df10a75df115fb4810534d9 100644 (file)
@@ -91,6 +91,7 @@
 #define MTK_QDMA_GLO_CFG       0x1A04
 #define MTK_RX_2B_OFFSET       BIT(31)
 #define MTK_RX_BT_32DWORDS     (3 << 11)
+#define MTK_NDP_CO_PRO         BIT(10)
 #define MTK_TX_WB_DDONE                BIT(6)
 #define MTK_DMA_SIZE_16DWORDS  (2 << 4)
 #define MTK_RX_DMA_BUSY                BIT(3)
@@ -357,6 +358,7 @@ struct mtk_rx_ring {
  * @rx_ring:           Pointer to the memore holding info about the RX ring
  * @rx_napi:           The NAPI struct
  * @scratch_ring:      Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring:  physical address of scratch_ring
  * @scratch_head:      The scratch memory that scratch_ring points to.
  * @clk_ethif:         The ethif clock
  * @clk_esw:           The switch clock
@@ -384,6 +386,7 @@ struct mtk_eth {
        struct mtk_rx_ring              rx_ring;
        struct napi_struct              rx_napi;
        struct mtk_tx_dma               *scratch_ring;
+       dma_addr_t                      phy_scratch_ring;
        void                            *scratch_head;
        struct clk                      *clk_ethif;
        struct clk                      *clk_esw;
index e94ca1c3fc7c6a83a190a29d4116086dcc9de8ca..f04a423ff79dd977c081300822a767173fd3c57e 100644 (file)
@@ -2597,7 +2597,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
        priv->cmd.free_head = 0;
 
        sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
-       spin_lock_init(&priv->cmd.context_lock);
 
        for (priv->cmd.token_mask = 1;
             priv->cmd.token_mask < priv->cmd.max_cmds;
index c761194bb32352be84c2610464a9778d1a365c34..44cf16d01f4275501870e3711fa27e5d8ec43a0e 100644 (file)
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
 
        for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
                if (bitmap_iterator_test(&it))
-                       data[index++] = ((unsigned long *)&priv->stats)[i];
+                       data[index++] = ((unsigned long *)&dev->stats)[i];
 
        for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
                if (bitmap_iterator_test(&it))
@@ -1042,6 +1042,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
+       struct mlx4_en_priv *tmp;
        u32 rx_size, tx_size;
        int port_up = 0;
        int err = 0;
@@ -1061,22 +1063,25 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
            tx_size == priv->tx_ring[0]->size)
                return 0;
 
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
        mutex_lock(&mdev->state_lock);
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       new_prof.tx_ring_size = tx_size;
+       new_prof.rx_ring_size = rx_size;
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       mlx4_en_free_resources(priv);
-
-       priv->prof->tx_ring_size = tx_size;
-       priv->prof->rx_ring_size = rx_size;
+       mlx4_en_safe_replace_resources(priv, tmp);
 
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
        if (port_up) {
                err = mlx4_en_start_port(dev);
                if (err)
@@ -1084,8 +1089,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
        }
 
        err = mlx4_en_moderation_update(priv);
-
 out:
+       kfree(tmp);
        mutex_unlock(&mdev->state_lock);
        return err;
 }
@@ -1714,6 +1719,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
+       struct mlx4_en_priv *tmp;
        int port_up = 0;
        int err = 0;
 
@@ -1723,23 +1730,26 @@ static int mlx4_en_set_channels(struct net_device *dev,
            !channel->tx_count || !channel->rx_count)
                return -EINVAL;
 
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
        mutex_lock(&mdev->state_lock);
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       new_prof.num_tx_rings_p_up = channel->tx_count;
+       new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+       new_prof.rx_ring_num = channel->rx_count;
+
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       mlx4_en_free_resources(priv);
-
-       priv->num_tx_rings_p_up = channel->tx_count;
-       priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
-       priv->rx_ring_num = channel->rx_count;
-
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
+       mlx4_en_safe_replace_resources(priv, tmp);
 
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -1757,8 +1767,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
        }
 
        err = mlx4_en_moderation_update(priv);
-
 out:
+       kfree(tmp);
        mutex_unlock(&mdev->state_lock);
        return err;
 }
index 92e0624f4cf03cef36f085c8fa4f2669c80c0ef3..8359e9e51b3b4c99d95238575fbe0285b61fa571 100644 (file)
@@ -406,14 +406,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
        mutex_lock(&mdev->state_lock);
        if (mdev->device_up && priv->port_up) {
                err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-               if (err)
+               if (err) {
                        en_err(priv, "Failed configuring VLAN filter\n");
+                       goto out;
+               }
        }
-       if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
-               en_dbg(HW, priv, "failed adding vlan %d\n", vid);
-       mutex_unlock(&mdev->state_lock);
+       err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
+       if (err)
+               en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
 
-       return 0;
+out:
+       mutex_unlock(&mdev->state_lock);
+       return err;
 }
 
 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
@@ -421,7 +425,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int err;
+       int err = 0;
 
        en_dbg(HW, priv, "Killing VID:%d\n", vid);
 
@@ -438,7 +442,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
        }
        mutex_unlock(&mdev->state_lock);
 
-       return 0;
+       return err;
 }
 
 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
@@ -1296,15 +1300,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
 }
 
 
-static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
        spin_lock_bh(&priv->stats_lock);
-       memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+       netdev_stats_to_stats64(stats, &dev->stats);
        spin_unlock_bh(&priv->stats_lock);
 
-       return &priv->ret_stats;
+       return stats;
 }
 
 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1876,7 +1881,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
        if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
                en_dbg(HW, priv, "Failed dumping statistics\n");
 
-       memset(&priv->stats, 0, sizeof(priv->stats));
        memset(&priv->pstats, 0, sizeof(priv->pstats));
        memset(&priv->pkstats, 0, sizeof(priv->pkstats));
        memset(&priv->port_stats, 0, sizeof(priv->port_stats));
@@ -1892,6 +1896,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
                priv->tx_ring[i]->bytes = 0;
                priv->tx_ring[i]->packets = 0;
                priv->tx_ring[i]->tx_csum = 0;
+               priv->tx_ring[i]->tx_dropped = 0;
+               priv->tx_ring[i]->queue_stopped = 0;
+               priv->tx_ring[i]->wake_queue = 0;
+               priv->tx_ring[i]->tso_packets = 0;
+               priv->tx_ring[i]->xmit_more = 0;
        }
        for (i = 0; i < priv->rx_ring_num; i++) {
                priv->rx_ring[i]->bytes = 0;
@@ -1945,7 +1954,7 @@ static int mlx4_en_close(struct net_device *dev)
        return 0;
 }
 
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 {
        int i;
 
@@ -1970,7 +1979,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 
 }
 
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
        struct mlx4_en_port_profile *prof = priv->prof;
        int i;
@@ -2027,11 +2036,91 @@ err:
        return -ENOMEM;
 }
 
+static void mlx4_en_shutdown(struct net_device *dev)
+{
+       rtnl_lock();
+       netif_device_detach(dev);
+       mlx4_en_close(dev);
+       rtnl_unlock();
+}
+
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
+                            struct mlx4_en_priv *src,
+                            struct mlx4_en_port_profile *prof)
+{
+       memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
+              sizeof(dst->hwtstamp_config));
+       dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+       dst->tx_ring_num = prof->tx_ring_num;
+       dst->rx_ring_num = prof->rx_ring_num;
+       dst->flags = prof->flags;
+       dst->mdev = src->mdev;
+       dst->port = src->port;
+       dst->dev = src->dev;
+       dst->prof = prof;
+       dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+                                        DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+
+       dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+                               GFP_KERNEL);
+       if (!dst->tx_ring)
+               return -ENOMEM;
+
+       dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+                             GFP_KERNEL);
+       if (!dst->tx_cq) {
+               kfree(dst->tx_ring);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+                               struct mlx4_en_priv *src)
+{
+       memcpy(dst->rx_ring, src->rx_ring,
+              sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
+       memcpy(dst->rx_cq, src->rx_cq,
+              sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
+       memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
+              sizeof(dst->hwtstamp_config));
+       dst->tx_ring_num = src->tx_ring_num;
+       dst->rx_ring_num = src->rx_ring_num;
+       dst->tx_ring = src->tx_ring;
+       dst->tx_cq = src->tx_cq;
+       memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
+}
+
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+                               struct mlx4_en_priv *tmp,
+                               struct mlx4_en_port_profile *prof)
+{
+       mlx4_en_copy_priv(tmp, priv, prof);
+
+       if (mlx4_en_alloc_resources(tmp)) {
+               en_warn(priv,
+                       "%s: Resource allocation failed, using previous configuration\n",
+                       __func__);
+               kfree(tmp->tx_ring);
+               kfree(tmp->tx_cq);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+                                   struct mlx4_en_priv *tmp)
+{
+       mlx4_en_free_resources(priv);
+       mlx4_en_update_priv(priv, tmp);
+}
 
 void mlx4_en_destroy_netdev(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       bool shutdown = mdev->dev->persist->interface_state &
+                                           MLX4_INTERFACE_STATE_SHUTDOWN;
 
        en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
@@ -2039,7 +2128,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        if (priv->registered) {
                devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
                                                              priv->port));
-               unregister_netdev(dev);
+               if (shutdown)
+                       mlx4_en_shutdown(dev);
+               else
+                       unregister_netdev(dev);
        }
 
        if (priv->allocated)
@@ -2059,12 +2151,17 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        mdev->upper[priv->port] = NULL;
        mutex_unlock(&mdev->state_lock);
 
+#ifdef CONFIG_RFS_ACCEL
+       mlx4_en_cleanup_filters(priv);
+#endif
+
        mlx4_en_free_resources(priv);
 
        kfree(priv->tx_ring);
        kfree(priv->tx_cq);
 
-       free_netdev(dev);
+       if (!shutdown)
+               free_netdev(dev);
 }
 
 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
@@ -2442,9 +2539,14 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
         * strip that feature if this is an IPv6 encapsulated frame.
         */
        if (skb->encapsulation &&
-           (skb->ip_summed == CHECKSUM_PARTIAL) &&
-           (ip_hdr(skb)->version != 4))
-               features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+           (skb->ip_summed == CHECKSUM_PARTIAL)) {
+               struct mlx4_en_priv *priv = netdev_priv(dev);
+
+               if (!priv->vxlan_port ||
+                   (ip_hdr(skb)->version != 4) ||
+                   (udp_hdr(skb)->dest != priv->vxlan_port))
+                       features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+       }
 
        return features;
 }
@@ -2482,7 +2584,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_stop               = mlx4_en_close,
        .ndo_start_xmit         = mlx4_en_xmit,
        .ndo_select_queue       = mlx4_en_select_queue,
-       .ndo_get_stats          = mlx4_en_get_stats,
+       .ndo_get_stats64        = mlx4_en_get_stats64,
        .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
        .ndo_set_mac_address    = mlx4_en_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
@@ -2514,7 +2616,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_stop               = mlx4_en_close,
        .ndo_start_xmit         = mlx4_en_xmit,
        .ndo_select_queue       = mlx4_en_select_queue,
-       .ndo_get_stats          = mlx4_en_get_stats,
+       .ndo_get_stats64        = mlx4_en_get_stats64,
        .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
        .ndo_set_mac_address    = mlx4_en_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
@@ -3097,6 +3199,8 @@ int mlx4_en_reset_config(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
+       struct mlx4_en_priv *tmp;
        int port_up = 0;
        int err = 0;
 
@@ -3113,19 +3217,29 @@ int mlx4_en_reset_config(struct net_device *dev,
                return -EINVAL;
        }
 
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
        mutex_lock(&mdev->state_lock);
+
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       mlx4_en_free_resources(priv);
-
        en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
-               ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+               ts_config.rx_filter,
+               !!(features & NETIF_F_HW_VLAN_CTAG_RX));
 
-       priv->hwtstamp_config.tx_type = ts_config.tx_type;
-       priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+       mlx4_en_safe_replace_resources(priv, tmp);
 
        if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
                if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3159,11 +3273,6 @@ int mlx4_en_reset_config(struct net_device *dev,
                dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
        }
 
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
        if (port_up) {
                err = mlx4_en_start_port(dev);
                if (err)
@@ -3172,6 +3281,8 @@ int mlx4_en_reset_config(struct net_device *dev,
 
 out:
        mutex_unlock(&mdev->state_lock);
-       netdev_features_change(dev);
+       kfree(tmp);
+       if (!err)
+               netdev_features_change(dev);
        return err;
 }
index 20b6c2e678b8879525ae41c995d5bc7692bfdec5..5aa8b751f4170c782f13a3d0ef6f3b557b21168d 100644 (file)
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        struct mlx4_counter tmp_counter_stats;
        struct mlx4_en_stat_out_mbox *mlx4_en_stats;
        struct mlx4_en_stat_out_flow_control_mbox *flowstats;
-       struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device *dev = mdev->pndev[port];
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct net_device_stats *stats = &dev->stats;
        struct mlx4_cmd_mailbox *mailbox;
        u64 in_mod = reset << 8 | port;
        int err;
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        }
        stats->tx_packets = 0;
        stats->tx_bytes = 0;
+       stats->tx_dropped = 0;
        priv->port_stats.tx_chksum_offload = 0;
        priv->port_stats.queue_stopped = 0;
        priv->port_stats.wake_queue = 0;
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
 
                stats->tx_packets += ring->packets;
                stats->tx_bytes += ring->bytes;
+               stats->tx_dropped += ring->tx_dropped;
                priv->port_stats.tx_chksum_offload += ring->tx_csum;
                priv->port_stats.queue_stopped     += ring->queue_stopped;
                priv->port_stats.wake_queue        += ring->wake_queue;
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
                                          &mlx4_en_stats->MCAST_prio_1,
                                          NUM_PRIORITIES);
-       stats->collisions = 0;
        stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
                            sw_rx_dropped;
        stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
-       stats->rx_over_errors = 0;
        stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
-       stats->rx_frame_errors = 0;
        stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
-       stats->rx_missed_errors = 0;
-       stats->tx_aborted_errors = 0;
-       stats->tx_carrier_errors = 0;
-       stats->tx_fifo_errors = 0;
-       stats->tx_heartbeat_errors = 0;
-       stats->tx_window_errors = 0;
-       stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+       stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
 
        /* RX stats */
        priv->pkstats.rx_multicast_packets = stats->multicast;
index c1b3a9c8cf3b4db9412e722e09983e72c64e3caa..99b5407f2278c0f292961cf8b458e4b4dfa584e7 100644 (file)
@@ -514,9 +514,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
        ring->rx_info = NULL;
        kfree(ring);
        *pring = NULL;
-#ifdef CONFIG_RFS_ACCEL
-       mlx4_en_cleanup_filters(priv);
-#endif
 }
 
 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
index f6e61570cb2c2b53a13adfbfbccc2c00e084171b..76aa4d27183c7d9527141ec168fd347a5d1b0d86 100644 (file)
@@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        bool inline_ok;
        u32 ring_cons;
 
-       if (!priv->port_up)
-               goto tx_drop;
-
        tx_ind = skb_get_queue_mapping(skb);
        ring = priv->tx_ring[tx_ind];
 
+       if (!priv->port_up)
+               goto tx_drop;
+
        /* fetch ring->cons far ahead before needing it to avoid stall */
        ring_cons = ACCESS_ONCE(ring->cons);
 
@@ -1030,7 +1030,7 @@ tx_drop_unmap:
 
 tx_drop:
        dev_kfree_skb_any(skb);
-       priv->stats.tx_dropped++;
+       ring->tx_dropped++;
        return NETDEV_TX_OK;
 }
 
index 12c77a70abdb451c475680c05bb2b7cf86436cdf..546fab0ecc3bb5e8f8cbaa4fe401dcca1ca070d9 100644 (file)
@@ -3222,6 +3222,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
 
        INIT_LIST_HEAD(&priv->pgdir_list);
        mutex_init(&priv->pgdir_mutex);
+       spin_lock_init(&priv->cmd.context_lock);
 
        INIT_LIST_HEAD(&priv->bf_list);
        mutex_init(&priv->bf_mutex);
@@ -4134,8 +4135,11 @@ static void mlx4_shutdown(struct pci_dev *pdev)
 
        mlx4_info(persist->dev, "mlx4_shutdown was called\n");
        mutex_lock(&persist->interface_state_mutex);
-       if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+       if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
+               /* Notify mlx4 clients that the kernel is being shut down */
+               persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
                mlx4_unload_one(pdev);
+       }
        mutex_unlock(&persist->interface_state_mutex);
 }
 
index cc84e09f324a42aa526be4f1e5a74f1c88899791..13d297ee34bb3f7fa195891e3959ce62605e580b 100644 (file)
@@ -270,6 +270,7 @@ struct mlx4_en_tx_ring {
        unsigned long           tx_csum;
        unsigned long           tso_packets;
        unsigned long           xmit_more;
+       unsigned int            tx_dropped;
        struct mlx4_bf          bf;
        unsigned long           queue_stopped;
 
@@ -352,12 +353,14 @@ struct mlx4_en_port_profile {
        u32 rx_ring_num;
        u32 tx_ring_size;
        u32 rx_ring_size;
+       u8 num_tx_rings_p_up;
        u8 rx_pause;
        u8 rx_ppp;
        u8 tx_pause;
        u8 tx_ppp;
        int rss_rings;
        int inline_thold;
+       struct hwtstamp_config hwtstamp_config;
 };
 
 struct mlx4_en_profile {
@@ -482,8 +485,6 @@ struct mlx4_en_priv {
        struct mlx4_en_port_profile *prof;
        struct net_device *dev;
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-       struct net_device_stats stats;
-       struct net_device_stats ret_stats;
        struct mlx4_en_port_state port_state;
        spinlock_t stats_lock;
        struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
@@ -624,8 +625,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
                              u8 rx_ppp, u8 rx_pause,
                              u8 tx_ppp, u8 tx_pause);
 
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+                               struct mlx4_en_priv *tmp,
+                               struct mlx4_en_port_profile *prof);
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+                                   struct mlx4_en_priv *tmp);
 
 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
                      int entries, int ring, enum cq_type mode, int node);
index b3cc3ab63799462709a569cff08f6e4c5de59f43..6fc156a3918d181f963d3d4d8fe65b219ef3e81c 100644 (file)
@@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
                        goto free_uar;
                }
 
-               uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
+               uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
+                                               uar->index << PAGE_SHIFT,
+                                               PAGE_SIZE);
                if (!uar->bf_map) {
                        err = -ENOMEM;
                        goto unamp_uar;
index dcd2df6518de32a8e412b10a548c0973caa15568..d6e2a1cae19ae2d6d636f1d306bca8e607106095 100644 (file)
@@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
        case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+       case MLX5_CMD_OP_2ERR_QP:
+       case MLX5_CMD_OP_2RST_QP:
+       case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+       case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+       case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+       case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
                return MLX5_CMD_STAT_OK;
 
        case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_RTR2RTS_QP:
        case MLX5_CMD_OP_RTS2RTS_QP:
        case MLX5_CMD_OP_SQERR2RTS_QP:
-       case MLX5_CMD_OP_2ERR_QP:
-       case MLX5_CMD_OP_2RST_QP:
        case MLX5_CMD_OP_QUERY_QP:
        case MLX5_CMD_OP_SQD_RTS_QP:
        case MLX5_CMD_OP_INIT2INIT_QP:
@@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
        case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
        case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
-       case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
        case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
        case MLX5_CMD_OP_SET_ROCE_ADDRESS:
        case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_CREATE_RQT:
        case MLX5_CMD_OP_MODIFY_RQT:
        case MLX5_CMD_OP_QUERY_RQT:
+
        case MLX5_CMD_OP_CREATE_FLOW_TABLE:
        case MLX5_CMD_OP_QUERY_FLOW_TABLE:
        case MLX5_CMD_OP_CREATE_FLOW_GROUP:
        case MLX5_CMD_OP_QUERY_FLOW_GROUP:
-       case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+
        case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
        case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
@@ -545,6 +549,7 @@ const char *mlx5_command_str(int command)
        MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
        MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
        MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
+       MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
        default: return "unknown command opcode";
        }
 }
@@ -601,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
                pr_debug("\n");
 }
 
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+       struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+       return be16_to_cpu(hdr->opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+       struct delayed_work *dwork = container_of(work, struct delayed_work,
+                                                 work);
+       struct mlx5_cmd_work_ent *ent = container_of(dwork,
+                                                    struct mlx5_cmd_work_ent,
+                                                    cb_timeout_work);
+       struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+                                                cmd);
+
+       ent->ret = -ETIMEDOUT;
+       mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+                      mlx5_command_str(msg_to_opcode(ent->in)),
+                      msg_to_opcode(ent->in));
+       mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
 static void cmd_work_handler(struct work_struct *work)
 {
        struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
        struct mlx5_cmd *cmd = ent->cmd;
        struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+       unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
        struct mlx5_cmd_layout *lay;
        struct semaphore *sem;
        unsigned long flags;
@@ -646,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
        dump_command(dev, ent, 1);
        ent->ts1 = ktime_get_ns();
 
+       if (ent->callback)
+               schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
        /* ring doorbell after the descriptor is valid */
        mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
        wmb();
@@ -690,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
        }
 }
 
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
-       struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
-       return be16_to_cpu(hdr->opcode);
-}
-
 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
 {
        unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -705,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
 
        if (cmd->mode == CMD_MODE_POLLING) {
                wait_for_completion(&ent->done);
-               err = ent->ret;
-       } else {
-               if (!wait_for_completion_timeout(&ent->done, timeout))
-                       err = -ETIMEDOUT;
-               else
-                       err = 0;
+       } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+               ent->ret = -ETIMEDOUT;
+               mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
        }
+
+       err = ent->ret;
+
        if (err == -ETIMEDOUT) {
                mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
                               mlx5_command_str(msg_to_opcode(ent->in)),
@@ -760,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
        if (!callback)
                init_completion(&ent->done);
 
+       INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
        INIT_WORK(&ent->work, cmd_work_handler);
        if (page_queue) {
                cmd_work_handler(&ent->work);
@@ -769,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
                goto out_free;
        }
 
-       if (!callback) {
-               err = wait_func(dev, ent);
-               if (err == -ETIMEDOUT)
-                       goto out;
-
-               ds = ent->ts2 - ent->ts1;
-               op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
-               if (op < ARRAY_SIZE(cmd->stats)) {
-                       stats = &cmd->stats[op];
-                       spin_lock_irq(&stats->lock);
-                       stats->sum += ds;
-                       ++stats->n;
-                       spin_unlock_irq(&stats->lock);
-               }
-               mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
-                                  "fw exec time for %s is %lld nsec\n",
-                                  mlx5_command_str(op), ds);
-               *status = ent->status;
-               free_cmd(ent);
-       }
+       if (callback)
+               goto out;
 
-       return err;
+       err = wait_func(dev, ent);
+       if (err == -ETIMEDOUT)
+               goto out_free;
+
+       ds = ent->ts2 - ent->ts1;
+       op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+       if (op < ARRAY_SIZE(cmd->stats)) {
+               stats = &cmd->stats[op];
+               spin_lock_irq(&stats->lock);
+               stats->sum += ds;
+               ++stats->n;
+               spin_unlock_irq(&stats->lock);
+       }
+       mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+                          "fw exec time for %s is %lld nsec\n",
+                          mlx5_command_str(op), ds);
+       *status = ent->status;
 
 out_free:
        free_cmd(ent);
@@ -1180,41 +1205,30 @@ err_dbg:
        return err;
 }
 
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        int i;
 
        for (i = 0; i < cmd->max_reg_cmds; i++)
                down(&cmd->sem);
-
        down(&cmd->pages_sem);
 
-       flush_workqueue(cmd->wq);
-
-       cmd->mode = CMD_MODE_EVENTS;
+       cmd->mode = mode;
 
        up(&cmd->pages_sem);
        for (i = 0; i < cmd->max_reg_cmds; i++)
                up(&cmd->sem);
 }
 
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
 {
-       struct mlx5_cmd *cmd = &dev->cmd;
-       int i;
-
-       for (i = 0; i < cmd->max_reg_cmds; i++)
-               down(&cmd->sem);
-
-       down(&cmd->pages_sem);
-
-       flush_workqueue(cmd->wq);
-       cmd->mode = CMD_MODE_POLLING;
+       mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
 
-       up(&cmd->pages_sem);
-       for (i = 0; i < cmd->max_reg_cmds; i++)
-               up(&cmd->sem);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+       mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
 }
 
 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1250,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
                        struct semaphore *sem;
 
                        ent = cmd->ent_arr[i];
+                       if (ent->callback)
+                               cancel_delayed_work(&ent->cb_timeout_work);
                        if (ent->page_queue)
                                sem = &cmd->pages_sem;
                        else
index e8a6c3325b396c6295db60c833309c4e4102e780..943b1bd434bf50cf6ef26b408035f3af4ea6c38a 100644 (file)
@@ -145,7 +145,6 @@ struct mlx5e_umr_wqe {
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_MIN_BW_ALLOC 1   /* Min percentage of BW allocation */
 #endif
 
 struct mlx5e_params {
@@ -191,6 +190,7 @@ struct mlx5e_tstamp {
 enum {
        MLX5E_RQ_STATE_POST_WQES_ENABLE,
        MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+       MLX5E_RQ_STATE_FLUSH_TIMEOUT,
 };
 
 struct mlx5e_cq {
@@ -220,6 +220,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
 typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
                                  u16 ix);
 
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
 struct mlx5e_dma_info {
        struct page     *page;
        dma_addr_t      addr;
@@ -241,6 +243,7 @@ struct mlx5e_rq {
        struct mlx5e_cq        cq;
        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
        mlx5e_fp_alloc_wqe     alloc_wqe;
+       mlx5e_fp_dealloc_wqe   dealloc_wqe;
 
        unsigned long          state;
        int                    ix;
@@ -305,6 +308,7 @@ struct mlx5e_sq_dma {
 enum {
        MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
        MLX5E_SQ_STATE_BF_ENABLE,
+       MLX5E_SQ_STATE_TX_TIMEOUT,
 };
 
 struct mlx5e_ico_wqe_info {
@@ -401,7 +405,7 @@ enum mlx5e_traffic_types {
 };
 
 enum {
-       MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+       MLX5E_STATE_ASYNC_EVENTS_ENABLED,
        MLX5E_STATE_OPENED,
        MLX5E_STATE_DESTROYING,
 };
@@ -538,6 +542,7 @@ struct mlx5e_priv {
        struct workqueue_struct    *wq;
        struct work_struct         update_carrier_work;
        struct work_struct         set_rx_mode_work;
+       struct work_struct         tx_timeout_work;
        struct delayed_work        update_stats_work;
 
        struct mlx5_core_dev      *mdev;
@@ -589,12 +594,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
 
 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
 int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
 int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
 void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
                                    struct mlx5_cqe64 *cqe,
index b2db180ae2a5bbdda29219d63c72feea0958c12c..c585349e05c38ed26b49898d23be1ef83fa721a6 100644 (file)
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
                        tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
                        break;
                case IEEE_8021QAZ_TSA_ETS:
-                       tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+                       tc_tx_bw[i] = ets->tc_tx_bw[i];
                        break;
                }
        }
@@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
 
        /* Validate Bandwidth Sum */
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+                       if (!ets->tc_tx_bw[i])
+                               return -EINVAL;
+
                        bw_sum += ets->tc_tx_bw[i];
+               }
        }
 
        if (bw_sum != 0 && bw_sum != 100)
index fc7dcc03b1debd36c6362d887e4f90b3cefab50f..e667a870e0c273e72c49e3708ba0de6370dceaca 100644 (file)
@@ -184,7 +184,9 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
 #define MLX5E_NUM_SQ_STATS(priv) \
        (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
         test_bit(MLX5E_STATE_OPENED, &priv->state))
-#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv))
+#define MLX5E_NUM_PFC_COUNTERS(priv) \
+       (hweight8(mlx5e_query_pfc_combined(priv)) * \
+        NUM_PPORT_PER_PRIO_PFC_COUNTERS)
 
 static int mlx5e_get_sset_count(struct net_device *dev, int sset)
 {
@@ -211,42 +213,41 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
 
        /* SW counters */
        for (i = 0; i < NUM_SW_COUNTERS; i++)
-               strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name);
+               strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
 
        /* Q counters */
        for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
-               strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name);
+               strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
 
        /* VPORT counters */
        for (i = 0; i < NUM_VPORT_COUNTERS; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      vport_stats_desc[i].name);
+                      vport_stats_desc[i].format);
 
        /* PPORT counters */
        for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      pport_802_3_stats_desc[i].name);
+                      pport_802_3_stats_desc[i].format);
 
        for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      pport_2863_stats_desc[i].name);
+                      pport_2863_stats_desc[i].format);
 
        for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      pport_2819_stats_desc[i].name);
+                      pport_2819_stats_desc[i].format);
 
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
-                       sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
-                               prio,
-                               pport_per_prio_traffic_stats_desc[i].name);
+                       sprintf(data + (idx++) * ETH_GSTRING_LEN,
+                               pport_per_prio_traffic_stats_desc[i].format, prio);
        }
 
        pfc_combined = mlx5e_query_pfc_combined(priv);
        for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
-                       sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s",
-                               prio, pport_per_prio_pfc_stats_desc[i].name);
+                       sprintf(data + (idx++) * ETH_GSTRING_LEN,
+                               pport_per_prio_pfc_stats_desc[i].format, prio);
                }
        }
 
@@ -256,16 +257,15 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
        /* per channel counters */
        for (i = 0; i < priv->params.num_channels; i++)
                for (j = 0; j < NUM_RQ_STATS; j++)
-                       sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i,
-                               rq_stats_desc[j].name);
+                       sprintf(data + (idx++) * ETH_GSTRING_LEN,
+                               rq_stats_desc[j].format, i);
 
        for (tc = 0; tc < priv->params.num_tc; tc++)
                for (i = 0; i < priv->params.num_channels; i++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
                                sprintf(data + (idx++) * ETH_GSTRING_LEN,
-                                       "tx%d_%s",
-                                       priv->channeltc_to_txq_map[i][tc],
-                                       sq_stats_desc[j].name);
+                                       sq_stats_desc[j].format,
+                                       priv->channeltc_to_txq_map[i][tc]);
 }
 
 static void mlx5e_get_strings(struct net_device *dev,
index fd4392999eeefcd9a2f77b05583b04d04a10d663..5a4d88c2cdb292880496831603fb741f5700a880 100644 (file)
 #include "eswitch.h"
 #include "vxlan.h"
 
+enum {
+       MLX5_EN_QP_FLUSH_TIMEOUT_MS     = 5000,
+       MLX5_EN_QP_FLUSH_MSLEEP_QUANT   = 20,
+       MLX5_EN_QP_FLUSH_MAX_ITER       = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
+                                         MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
+};
+
 struct mlx5e_rq_param {
        u32                        rqc[MLX5_ST_SZ_DW(rqc)];
        struct mlx5_wq_param       wq;
@@ -74,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
        port_state = mlx5_query_vport_state(mdev,
                MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
 
-       if (port_state == VPORT_STATE_UP)
+       if (port_state == VPORT_STATE_UP) {
+               netdev_info(priv->netdev, "Link up\n");
                netif_carrier_on(priv->netdev);
-       else
+       } else {
+               netdev_info(priv->netdev, "Link down\n");
                netif_carrier_off(priv->netdev);
+       }
 }
 
 static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -91,6 +101,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
        mutex_unlock(&priv->state_lock);
 }
 
+static void mlx5e_tx_timeout_work(struct work_struct *work)
+{
+       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+                                              tx_timeout_work);
+       int err;
+
+       rtnl_lock();
+       mutex_lock(&priv->state_lock);
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               goto unlock;
+       mlx5e_close_locked(priv->netdev);
+       err = mlx5e_open_locked(priv->netdev);
+       if (err)
+               netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+                          err);
+unlock:
+       mutex_unlock(&priv->state_lock);
+       rtnl_unlock();
+}
+
 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 {
        struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -105,11 +135,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 
                s->rx_packets   += rq_stats->packets;
                s->rx_bytes     += rq_stats->bytes;
-               s->lro_packets  += rq_stats->lro_packets;
-               s->lro_bytes    += rq_stats->lro_bytes;
+               s->rx_lro_packets += rq_stats->lro_packets;
+               s->rx_lro_bytes += rq_stats->lro_bytes;
                s->rx_csum_none += rq_stats->csum_none;
-               s->rx_csum_sw   += rq_stats->csum_sw;
-               s->rx_csum_inner += rq_stats->csum_inner;
+               s->rx_csum_complete += rq_stats->csum_complete;
+               s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
                s->rx_wqe_err   += rq_stats->wqe_err;
                s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
                s->rx_mpwqe_frag   += rq_stats->mpwqe_frag;
@@ -122,24 +152,23 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 
                        s->tx_packets           += sq_stats->packets;
                        s->tx_bytes             += sq_stats->bytes;
-                       s->tso_packets          += sq_stats->tso_packets;
-                       s->tso_bytes            += sq_stats->tso_bytes;
-                       s->tso_inner_packets    += sq_stats->tso_inner_packets;
-                       s->tso_inner_bytes      += sq_stats->tso_inner_bytes;
+                       s->tx_tso_packets       += sq_stats->tso_packets;
+                       s->tx_tso_bytes         += sq_stats->tso_bytes;
+                       s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+                       s->tx_tso_inner_bytes   += sq_stats->tso_inner_bytes;
                        s->tx_queue_stopped     += sq_stats->stopped;
                        s->tx_queue_wake        += sq_stats->wake;
                        s->tx_queue_dropped     += sq_stats->dropped;
-                       s->tx_csum_inner        += sq_stats->csum_offload_inner;
-                       tx_offload_none         += sq_stats->csum_offload_none;
+                       s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+                       tx_offload_none         += sq_stats->csum_none;
                }
        }
 
        /* Update calculated offload counters */
-       s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
-       s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
-                            s->rx_csum_sw;
+       s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+       s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
 
-       s->link_down_events = MLX5_GET(ppcnt_reg,
+       s->link_down_events_phy = MLX5_GET(ppcnt_reg,
                                priv->stats.pport.phy_counters,
                                counter_set.phys_layer_cntrs.link_down_events);
 }
@@ -244,7 +273,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 {
        struct mlx5e_priv *priv = vpriv;
 
-       if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+       if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
                return;
 
        switch (event) {
@@ -260,12 +289,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
 
 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
 {
-       set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+       set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
 }
 
 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
 {
-       clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+       clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
        synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
 }
 
@@ -306,6 +335,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                }
                rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
                rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+               rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
 
                rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
                rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
@@ -321,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                }
                rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
                rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+               rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
 
                rq->wqe_sz = (priv->params.lro_en) ?
                                priv->params.lro_wqe_sz :
@@ -526,17 +557,25 @@ err_destroy_rq:
 
 static void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
+       int tout = 0;
+       int err;
+
        clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
 
-       mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
-       while (!mlx5_wq_ll_is_empty(&rq->wq))
-               msleep(20);
+       err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+       while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
+              tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
+               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+
+       if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
+               set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
 
        /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
        napi_synchronize(&rq->channel->napi);
 
        mlx5e_disable_rq(rq);
+       mlx5e_free_rx_descs(rq);
        mlx5e_destroy_rq(rq);
 }
 
@@ -580,7 +619,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
        int err;
 
-       err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
+       err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
        if (err)
                return err;
 
@@ -783,6 +822,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 
 static void mlx5e_close_sq(struct mlx5e_sq *sq)
 {
+       int tout = 0;
+       int err;
+
        if (sq->txq) {
                clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
                /* prevent netif_tx_wake_queue */
@@ -793,15 +835,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
                if (mlx5e_sq_has_room_for(sq, 1))
                        mlx5e_send_nop(sq, true);
 
-               mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+               err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+                                     MLX5_SQC_STATE_ERR);
+               if (err)
+                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
        }
 
-       while (sq->cc != sq->pc) /* wait till sq is empty */
-               msleep(20);
+       /* wait till sq is empty, unless a TX timeout occurred on this SQ */
+       while (sq->cc != sq->pc &&
+              !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
+               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+               if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
+                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+       }
 
        /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
        napi_synchronize(&sq->channel->napi);
 
+       mlx5e_free_tx_descs(sq);
        mlx5e_disable_sq(sq);
        mlx5e_destroy_sq(sq);
 }
@@ -1297,6 +1348,11 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
                        goto err_close_channels;
        }
 
+       /* FIXME: This is a W/A for tx timeout watch dog false alarm when
+        * polling for inactive tx queues.
+        */
+       netif_tx_start_all_queues(priv->netdev);
+
        kfree(cparam);
        return 0;
 
@@ -1316,6 +1372,12 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
 {
        int i;
 
+       /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+        * polling for inactive tx queues.
+        */
+       netif_tx_stop_all_queues(priv->netdev);
+       netif_tx_disable(priv->netdev);
+
        for (i = 0; i < priv->params.num_channels; i++)
                mlx5e_close_channel(priv->channel[i]);
 
@@ -1659,8 +1721,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
 
        netdev_set_num_tc(netdev, ntc);
 
+       /* Map netdev TCs to offset 0
+        * We have our own UP to TXQ mapping for QoS
+        */
        for (tc = 0; tc < ntc; tc++)
-               netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+               netdev_set_tc_queue(netdev, tc, nch, 0);
 }
 
 int mlx5e_open_locked(struct net_device *netdev)
@@ -2591,6 +2656,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
        return features;
 }
 
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       bool sched_work = false;
+       int i;
+
+       netdev_err(dev, "TX timeout detected\n");
+
+       for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+               struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+               if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+                       continue;
+               sched_work = true;
+               set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+               netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+                          i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+       }
+
+       if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+               schedule_work(&priv->tx_timeout_work);
+}
+
 static const struct net_device_ops mlx5e_netdev_ops_basic = {
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
@@ -2608,6 +2696,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
 #endif
+       .ndo_tx_timeout          = mlx5e_tx_timeout,
 };
 
 static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2637,6 +2726,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
        .ndo_get_vf_config       = mlx5e_get_vf_config,
        .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
        .ndo_get_vf_stats        = mlx5e_get_vf_stats,
+       .ndo_tx_timeout          = mlx5e_tx_timeout,
 };
 
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2839,6 +2929,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
 
        INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
        INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+       INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
        INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
 }
 
@@ -3192,10 +3283,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        flush_workqueue(priv->wq);
        if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
                netif_device_detach(netdev);
-               mutex_lock(&priv->state_lock);
-               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-                       mlx5e_close_locked(netdev);
-               mutex_unlock(&priv->state_lock);
+               mlx5e_close(netdev);
        } else {
                unregister_netdev(netdev);
        }
index bd947704b59c41747a71d75725a5d230bde5dfc0..9f2a16a507e04f8cd9861251ab3d0d5973b23e90 100644 (file)
@@ -212,6 +212,20 @@ err_free_skb:
        return -ENOMEM;
 }
 
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+       struct sk_buff *skb = rq->skb[ix];
+
+       if (skb) {
+               rq->skb[ix] = NULL;
+               dma_unmap_single(rq->pdev,
+                                *((dma_addr_t *)skb->cb),
+                                rq->wqe_sz,
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb(skb);
+       }
+}
+
 static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
 {
        return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
@@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
        return 0;
 }
 
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+
+       wi->free_wqe(rq, wi);
+}
+
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq = &rq->wq;
+       struct mlx5e_rx_wqe *wqe;
+       __be16 wqe_ix_be;
+       u16 wqe_ix;
+
+       while (!mlx5_wq_ll_is_empty(wq)) {
+               wqe_ix_be = *wq->tail_next;
+               wqe_ix    = be16_to_cpu(wqe_ix_be);
+               wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+               rq->dealloc_wqe(rq, wqe_ix);
+               mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+                              &wqe->next.next_wqe_index);
+       }
+}
+
 #define RQ_CANNOT_POST(rq) \
                (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
                 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
@@ -689,7 +727,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
        if (is_first_ethertype_ip(skb)) {
                skb->ip_summed = CHECKSUM_COMPLETE;
                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
-               rq->stats.csum_sw++;
+               rq->stats.csum_complete++;
                return;
        }
 
@@ -699,7 +737,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                if (cqe_is_tunneled(cqe)) {
                        skb->csum_level = 1;
                        skb->encapsulation = 1;
-                       rq->stats.csum_inner++;
+                       rq->stats.csum_unnecessary_inner++;
                }
                return;
        }
@@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
        int work_done = 0;
 
+       if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+               return 0;
+
        if (cq->decmprs_left)
                work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
 
index 83bc32b25849caae0b9422688e09fba1a3928fe2..fcd490cc56109d2279444db47131bb31432f923b 100644 (file)
        be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
 
 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
 
 struct counter_desc {
-       char            name[ETH_GSTRING_LEN];
+       char            format[ETH_GSTRING_LEN];
        int             offset; /* Byte offset */
 };
 
@@ -53,18 +55,18 @@ struct mlx5e_sw_stats {
        u64 rx_bytes;
        u64 tx_packets;
        u64 tx_bytes;
-       u64 tso_packets;
-       u64 tso_bytes;
-       u64 tso_inner_packets;
-       u64 tso_inner_bytes;
-       u64 lro_packets;
-       u64 lro_bytes;
-       u64 rx_csum_good;
+       u64 tx_tso_packets;
+       u64 tx_tso_bytes;
+       u64 tx_tso_inner_packets;
+       u64 tx_tso_inner_bytes;
+       u64 rx_lro_packets;
+       u64 rx_lro_bytes;
+       u64 rx_csum_unnecessary;
        u64 rx_csum_none;
-       u64 rx_csum_sw;
-       u64 rx_csum_inner;
-       u64 tx_csum_offload;
-       u64 tx_csum_inner;
+       u64 rx_csum_complete;
+       u64 rx_csum_unnecessary_inner;
+       u64 tx_csum_partial;
+       u64 tx_csum_partial_inner;
        u64 tx_queue_stopped;
        u64 tx_queue_wake;
        u64 tx_queue_dropped;
@@ -76,7 +78,7 @@ struct mlx5e_sw_stats {
        u64 rx_cqe_compress_pkts;
 
        /* Special handling counters */
-       u64 link_down_events;
+       u64 link_down_events_phy;
 };
 
 static const struct counter_desc sw_stats_desc[] = {
@@ -84,18 +86,18 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
@@ -105,7 +107,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
 };
 
 struct mlx5e_qcounter_stats {
@@ -125,12 +127,6 @@ struct mlx5e_vport_stats {
 };
 
 static const struct counter_desc vport_stats_desc[] = {
-       { "rx_vport_error_packets",
-               VPORT_COUNTER_OFF(received_errors.packets) },
-       { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) },
-       { "tx_vport_error_packets",
-               VPORT_COUNTER_OFF(transmit_errors.packets) },
-       { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) },
        { "rx_vport_unicast_packets",
                VPORT_COUNTER_OFF(received_eth_unicast.packets) },
        { "rx_vport_unicast_bytes",
@@ -192,94 +188,68 @@ struct mlx5e_pport_stats {
 };
 
 static const struct counter_desc pport_802_3_stats_desc[] = {
-       { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) },
-       { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) },
-       { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
-       { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) },
-       { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) },
-       { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) },
-       { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
-       { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
-       { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
-       { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
-       { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) },
-       { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) },
-       { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) },
-       { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
-       { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
-       { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) },
-       { "unsupported_op_rx",
-               PPORT_802_3_OFF(a_unsupported_opcodes_received) },
-       { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
-       { "pause_ctrl_tx",
-               PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+       { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+       { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+       { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+       { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+       { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+       { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+       { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+       { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+       { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+       { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+       { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+       { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+       { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+       { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+       { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+       { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+       { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+       { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
 };
 
 static const struct counter_desc pport_2863_stats_desc[] = {
-       { "in_octets", PPORT_2863_OFF(if_in_octets) },
-       { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) },
-       { "in_discards", PPORT_2863_OFF(if_in_discards) },
-       { "in_errors", PPORT_2863_OFF(if_in_errors) },
-       { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) },
-       { "out_octets", PPORT_2863_OFF(if_out_octets) },
-       { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) },
-       { "out_discards", PPORT_2863_OFF(if_out_discards) },
-       { "out_errors", PPORT_2863_OFF(if_out_errors) },
-       { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) },
-       { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) },
-       { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) },
-       { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) },
+       { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+       { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+       { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
 };
 
 static const struct counter_desc pport_2819_stats_desc[] = {
-       { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) },
-       { "octets", PPORT_2819_OFF(ether_stats_octets) },
-       { "pkts", PPORT_2819_OFF(ether_stats_pkts) },
-       { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) },
-       { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) },
-       { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) },
-       { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) },
-       { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) },
-       { "fragments", PPORT_2819_OFF(ether_stats_fragments) },
-       { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) },
-       { "collisions", PPORT_2819_OFF(ether_stats_collisions) },
-       { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) },
-       { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
-       { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
-       { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
-       { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
-       { "p1024to1518octets",
-               PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
-       { "p1519to2047octets",
-               PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
-       { "p2048to4095octets",
-               PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
-       { "p4096to8191octets",
-               PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
-       { "p8192to10239octets",
-               PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+       { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+       { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+       { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+       { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+       { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+       { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+       { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+       { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+       { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+       { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+       { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+       { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+       { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
 };
 
 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
-       { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) },
-       { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) },
-       { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) },
-       { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) },
+       { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+       { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+       { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+       { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
 };
 
 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
-       { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) },
-       { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
-       { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) },
-       { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
-       { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+       { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+       { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+       { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+       { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+       { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
 };
 
 struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
-       u64 csum_sw;
-       u64 csum_inner;
+       u64 csum_complete;
+       u64 csum_unnecessary_inner;
        u64 csum_none;
        u64 lro_packets;
        u64 lro_bytes;
@@ -292,19 +262,19 @@ struct mlx5e_rq_stats {
 };
 
 static const struct counter_desc rq_stats_desc[] = {
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
 };
 
 struct mlx5e_sq_stats {
@@ -315,28 +285,28 @@ struct mlx5e_sq_stats {
        u64 tso_bytes;
        u64 tso_inner_packets;
        u64 tso_inner_bytes;
-       u64 csum_offload_inner;
+       u64 csum_partial_inner;
        u64 nop;
        /* less likely accessed in data path */
-       u64 csum_offload_none;
+       u64 csum_none;
        u64 stopped;
        u64 wake;
        u64 dropped;
 };
 
 static const struct counter_desc sq_stats_desc[] = {
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
 };
 
 #define NUM_SW_COUNTERS                        ARRAY_SIZE(sw_stats_desc)
index 229ab16fb8d3a5c92afec2ece0a30e2de9a46236..5740b465ef8430f359ed218adf9f93bce9c46e8a 100644 (file)
@@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int channel_ix = fallback(dev, skb);
-       int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
-                skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
+       int up = 0;
+
+       if (!netdev_get_num_tc(dev))
+               return channel_ix;
+
+       if (skb_vlan_tag_present(skb))
+               up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+       /* channel_ix can be larger than num_channels since
+        * dev->num_real_tx_queues = num_channels * num_tc
+        */
+       if (channel_ix >= priv->params.num_channels)
+               channel_ix = reciprocal_scale(channel_ix,
+                                             priv->params.num_channels);
 
        return priv->channeltc_to_txq_map[channel_ix][up];
 }
@@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
         * headers and occur before the data gather.
         * Therefore these headers must be copied into the WQE
         */
-#define MLX5E_MIN_INLINE ETH_HLEN
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 
        if (bf) {
                u16 ihs = skb_headlen(skb);
@@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
                        return skb_headlen(skb);
        }
 
-       return MLX5E_MIN_INLINE;
+       return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
 }
 
 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -192,12 +204,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                if (skb->encapsulation) {
                        eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
                                          MLX5_ETH_WQE_L4_INNER_CSUM;
-                       sq->stats.csum_offload_inner++;
+                       sq->stats.csum_partial_inner++;
                } else {
                        eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
                }
        } else
-               sq->stats.csum_offload_none++;
+               sq->stats.csum_none++;
 
        if (sq->cc != sq->prev_cc) {
                sq->prev_cc = sq->cc;
@@ -317,7 +329,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        while ((sq->pc & wq->sz_m1) > sq->edge)
                mlx5e_send_nop(sq, false);
 
-       sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+       if (bf)
+               sq->bf_budget--;
 
        sq->stats.packets++;
        sq->stats.bytes += num_bytes;
@@ -340,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        return mlx5e_sq_xmit(sq, skb);
 }
 
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+       struct mlx5e_tx_wqe_info *wi;
+       struct sk_buff *skb;
+       u16 ci;
+       int i;
+
+       while (sq->cc != sq->pc) {
+               ci = sq->cc & sq->wq.sz_m1;
+               skb = sq->skb[ci];
+               wi = &sq->wqe_info[ci];
+
+               if (!skb) { /* nop */
+                       sq->cc++;
+                       continue;
+               }
+
+               for (i = 0; i < wi->num_dma; i++) {
+                       struct mlx5e_sq_dma *dma =
+                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+                       mlx5e_tx_dma_unmap(sq->pdev, dma);
+               }
+
+               dev_kfree_skb_any(skb);
+               sq->cc += wi->num_wqebbs;
+       }
+}
+
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
        struct mlx5e_sq *sq;
@@ -351,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
        sq = container_of(cq, struct mlx5e_sq, cq);
 
+       if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+               return false;
+
        npkts = 0;
        nbytes = 0;
 
index b84a6918a7006253c503cb924c347f2123854781..aebbd6ccb9fe29fb90d46518f854252236d35934 100644 (file)
@@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
                                   0, &dest);
-       if (IS_ERR_OR_NULL(flow_rule)) {
+       if (IS_ERR(flow_rule)) {
                pr_warn(
                        "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
                         dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
 
        table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
        fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
-       if (IS_ERR_OR_NULL(fdb)) {
+       if (IS_ERR(fdb)) {
                err = PTR_ERR(fdb);
                esw_warn(dev, "Failed to create FDB Table err %d\n", err);
                goto out;
@@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
        eth_broadcast_addr(dmac);
        g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create flow group err(%d)\n", err);
                goto out;
@@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        eth_zero_addr(dmac);
        dmac[0] = 0x01;
        g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
                goto out;
@@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
        g = mlx5_create_flow_group(fdb, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
                goto out;
@@ -529,7 +529,7 @@ out:
                }
        }
 
-       kfree(flow_group_in);
+       kvfree(flow_group_in);
        return err;
 }
 
@@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
                                        esw_fdb_set_vport_rule(esw,
                                                               mac,
                                                               vport_idx);
+                       iter_vaddr->mc_promisc = true;
                        break;
                case MLX5_ACTION_DEL:
                        if (!iter_vaddr)
@@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
                return;
 
        acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-       if (IS_ERR_OR_NULL(acl)) {
+       if (IS_ERR(acl)) {
                err = PTR_ERR(acl);
                esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
                         vport->vport, err);
@@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
 
        vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(vlan_grp)) {
+       if (IS_ERR(vlan_grp)) {
                err = PTR_ERR(vlan_grp);
                esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
                         vport->vport, err);
@@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
        drop_grp = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(drop_grp)) {
+       if (IS_ERR(drop_grp)) {
                err = PTR_ERR(drop_grp);
                esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
                         vport->vport, err);
@@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        vport->egress.drop_grp = drop_grp;
        vport->egress.allowed_vlans_grp = vlan_grp;
 out:
-       kfree(flow_group_in);
+       kvfree(flow_group_in);
        if (err && !IS_ERR_OR_NULL(vlan_grp))
                mlx5_destroy_flow_group(vlan_grp);
        if (err && !IS_ERR_OR_NULL(acl))
@@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
                return;
 
        acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-       if (IS_ERR_OR_NULL(acl)) {
+       if (IS_ERR(acl)) {
                err = PTR_ERR(acl);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
                         vport->vport, err);
@@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
                         vport->vport, err);
@@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
                         vport->vport, err);
@@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
                         vport->vport, err);
@@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
 
        g = mlx5_create_flow_group(acl, flow_group_in);
-       if (IS_ERR_OR_NULL(g)) {
+       if (IS_ERR(g)) {
                err = PTR_ERR(g);
                esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
                         vport->vport, err);
@@ -1259,7 +1260,7 @@ out:
                        mlx5_destroy_flow_table(vport->ingress.acl);
        }
 
-       kfree(flow_group_in);
+       kvfree(flow_group_in);
 }
 
 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
+       if (IS_ERR(vport->ingress.allow_rule)) {
                err = PTR_ERR(vport->ingress.allow_rule);
                pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
                        vport->vport, err);
@@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_DROP,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
+       if (IS_ERR(vport->ingress.drop_rule)) {
                err = PTR_ERR(vport->ingress.drop_rule);
                pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
                        vport->vport, err);
@@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_ALLOW,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
+       if (IS_ERR(vport->egress.allowed_vlan)) {
                err = PTR_ERR(vport->egress.allowed_vlan);
                pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
                        vport->vport, err);
@@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
                                   match_v,
                                   MLX5_FLOW_CONTEXT_ACTION_DROP,
                                   0, NULL);
-       if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
+       if (IS_ERR(vport->egress.drop_rule)) {
                err = PTR_ERR(vport->egress.drop_rule);
                pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
                        vport->vport, err);
@@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
 
        /* Sync with current vport context */
        vport->enabled_events = enable_events;
-       esw_vport_change_handle_locked(vport);
-
        vport->enabled = true;
 
        /* only PF is trusted by default */
        vport->trusted = (vport_num) ? false : true;
-
-       arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+       esw_vport_change_handle_locked(vport);
 
        esw->enabled_vports++;
        esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
@@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
        (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
 
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+       ((u8 *)node_guid)[7] = mac[0];
+       ((u8 *)node_guid)[6] = mac[1];
+       ((u8 *)node_guid)[5] = mac[2];
+       ((u8 *)node_guid)[4] = 0xff;
+       ((u8 *)node_guid)[3] = 0xfe;
+       ((u8 *)node_guid)[2] = mac[3];
+       ((u8 *)node_guid)[1] = mac[4];
+       ((u8 *)node_guid)[0] = mac[5];
+}
+
 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                               int vport, u8 mac[ETH_ALEN])
 {
-       int err = 0;
        struct mlx5_vport *evport;
+       u64 node_guid;
+       int err = 0;
 
        if (!ESW_ALLOWED(esw))
                return -EPERM;
@@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                return err;
        }
 
+       node_guid_gen_from_mac(&node_guid, mac);
+       err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+       if (err)
+               mlx5_core_warn(esw->dev,
+                              "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+                              vport, err);
+
        mutex_lock(&esw->state_lock);
        if (evport->enabled)
                err = esw_vport_ingress_config(esw, evport);
        mutex_unlock(&esw->state_lock);
-
        return err;
 }
 
index 8b5f0b2c0d5cd5d8f62cf6560ee9fa498800ac03..e912a3d2505ed39bee13717224e1c41111a29cda 100644 (file)
@@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
                                       ft->id);
                        return err;
                }
-               root->root_ft = new_root_ft;
        }
+       root->root_ft = new_root_ft;
        return 0;
 }
 
@@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
 
 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
 {
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return;
+
        cleanup_root_ns(dev);
        cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
        cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
@@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
 {
        int err = 0;
 
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return 0;
+
        err = mlx5_init_fc_stats(dev);
        if (err)
                return err;
 
-       if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+       if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+           MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
                err = init_root_ns(dev);
                if (err)
                        goto err;
        }
+
        if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
-               err = init_fdb_root_ns(dev);
-               if (err)
-                       goto err;
-       }
-       if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-               err = init_egress_acl_root_ns(dev);
-               if (err)
-                       goto err;
-       }
-       if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-               err = init_ingress_acl_root_ns(dev);
-               if (err)
-                       goto err;
+               if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+                       err = init_fdb_root_ns(dev);
+                       if (err)
+                               goto err;
+               }
+               if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+                       err = init_egress_acl_root_ns(dev);
+                       if (err)
+                               goto err;
+               }
+               if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+                       err = init_ingress_acl_root_ns(dev);
+                       if (err)
+                               goto err;
+               }
        }
 
        return 0;
index 42d16b9458e485205a344ab9a988caf7ae185632..96a59463ae65f1773856949b17a9224531cfe930 100644 (file)
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
 
 void mlx5_enter_error_state(struct mlx5_core_dev *dev)
 {
+       mutex_lock(&dev->intf_state_mutex);
        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
-               return;
+               goto unlock;
 
        mlx5_core_err(dev, "start\n");
-       if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+       if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
                dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+               trigger_cmd_completions(dev);
+       }
 
        mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
        mlx5_core_err(dev, "end\n");
+
+unlock:
+       mutex_unlock(&dev->intf_state_mutex);
 }
 
 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
        u32 count;
 
        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-               trigger_cmd_completions(dev);
                mod_timer(&health->timer, get_next_poll_jiffies());
                return;
        }
index a19b59348dd685816c88736a4d9e47f78b3f847c..6695893ddd2d407743e8329959a05840c28ad3f1 100644 (file)
@@ -1422,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
        mlx5_pci_err_detected(dev->pdev, 0);
 }
 
-/* wait for the device to show vital signs. For now we check
- * that we can read the device ID and that the health buffer
- * shows a non zero value which is different than 0xffffffff
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
  */
-static void wait_vital(struct pci_dev *pdev)
+static int wait_vital(struct pci_dev *pdev)
 {
        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
        struct mlx5_core_health *health = &dev->priv.health;
        const int niter = 100;
+       u32 last_count = 0;
        u32 count;
-       u16 did;
        int i;
 
-       /* Wait for firmware to be ready after reset */
-       msleep(1000);
-       for (i = 0; i < niter; i++) {
-               if (pci_read_config_word(pdev, 2, &did)) {
-                       dev_warn(&pdev->dev, "failed reading config word\n");
-                       break;
-               }
-               if (did == pdev->device) {
-                       dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
-                       break;
-               }
-               msleep(50);
-       }
-       if (i == niter)
-               dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-
        for (i = 0; i < niter; i++) {
                count = ioread32be(health->health_counter);
                if (count && count != 0xffffffff) {
-                       dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
-                       break;
+                       if (last_count && last_count != count) {
+                               dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+                               return 0;
+                       }
+                       last_count = count;
                }
                msleep(50);
        }
 
-       if (i == niter)
-               dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+       return -ETIMEDOUT;
 }
 
 static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1473,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
        dev_info(&pdev->dev, "%s was called\n", __func__);
 
        pci_save_state(pdev);
-       wait_vital(pdev);
+       err = wait_vital(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+               return;
+       }
 
        err = mlx5_load_one(dev, priv);
        if (err)
@@ -1508,8 +1497,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4 VF */
        { PCI_VDEVICE(MELLANOX, 0x1015) },                      /* ConnectX-4LX */
        { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4LX VF */
-       { PCI_VDEVICE(MELLANOX, 0x1017) },                      /* ConnectX-5 */
+       { PCI_VDEVICE(MELLANOX, 0x1017) },                      /* ConnectX-5, PCIe 3.0 */
        { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},   /* ConnectX-5 VF */
+       { PCI_VDEVICE(MELLANOX, 0x1019) },                      /* ConnectX-5, PCIe 4.0 */
        { 0, }
 };
 
index 9eeee0545f1cf294ecddb7bb0becc873df8906da..32dea3524cee3ed4d984eb5aafc7a06abdbd713b 100644 (file)
@@ -345,7 +345,6 @@ retry:
                               func_id, npages, err);
                goto out_4k;
        }
-       dev->priv.fw_pages += npages;
 
        err = mlx5_cmd_status_to_err(&out.hdr);
        if (err) {
@@ -373,6 +372,33 @@ out_free:
        return err;
 }
 
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+                            struct mlx5_manage_pages_inbox *in, int in_size,
+                            struct mlx5_manage_pages_outbox *out, int out_size)
+{
+       struct fw_page *fwp;
+       struct rb_node *p;
+       u32 npages;
+       u32 i = 0;
+
+       if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+               return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
+                                                 (u32 *)out, out_size);
+
+       npages = be32_to_cpu(in->num_entries);
+
+       p = rb_first(&dev->priv.page_root);
+       while (p && i < npages) {
+               fwp = rb_entry(p, struct fw_page, rb_node);
+               out->pas[i] = cpu_to_be64(fwp->addr);
+               p = rb_next(p);
+               i++;
+       }
+
+       out->num_entries = cpu_to_be32(i);
+       return 0;
+}
+
 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                         int *nclaimed)
 {
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        in.func_id = cpu_to_be16(func_id);
        in.num_entries = cpu_to_be32(npages);
        mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
-       err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+       err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
        if (err) {
-               mlx5_core_err(dev, "failed reclaiming pages\n");
-               goto out_free;
-       }
-       dev->priv.fw_pages -= npages;
-
-       if (out->hdr.status) {
-               err = mlx5_cmd_status_to_err(&out->hdr);
+               mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
                goto out_free;
        }
 
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                err = -EINVAL;
                goto out_free;
        }
-       if (nclaimed)
-               *nclaimed = num_claimed;
 
        for (i = 0; i < num_claimed; i++) {
                addr = be64_to_cpu(out->pas[i]);
                free_4k(dev, addr);
        }
+
+       if (nclaimed)
+               *nclaimed = num_claimed;
+
        dev->priv.fw_pages -= num_claimed;
        if (func_id)
                dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
                p = rb_first(&dev->priv.page_root);
                if (p) {
                        fwp = rb_entry(p, struct fw_page, rb_node);
-                       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-                               free_4k(dev, fwp->addr);
-                               nclaimed = 1;
-                       } else {
-                               err = reclaim_pages(dev, fwp->func_id,
-                                                   optimal_reclaimed_pages(),
-                                                   &nclaimed);
-                       }
+                       err = reclaim_pages(dev, fwp->func_id,
+                                           optimal_reclaimed_pages(),
+                                           &nclaimed);
+
                        if (err) {
                                mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
                                               err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
                }
        } while (p);
 
+       WARN(dev->priv.fw_pages,
+            "FW pages counter is %d after reclaiming all pages\n",
+            dev->priv.fw_pages);
+       WARN(dev->priv.vfs_pages,
+            "VFs FW pages counter is %d after reclaiming all pages\n",
+            dev->priv.vfs_pages);
+
        return 0;
 }
 
index b720a274220d95793f5803620d18db8fcae2073b..b82d65802d964a2d03d9e2a99c2719c7642d72d3 100644 (file)
@@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
        if (out.hdr.status)
                err = mlx5_cmd_status_to_err(&out.hdr);
        else
-               *xrcdn = be32_to_cpu(out.xrcdn);
+               *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
 
        return err;
 }
index b69dadcfb897a25a51b89a9caed4d1454d603d8e..91846dfcbe9cf1b39f0fbf6af8856ad4771dd1ff 100644 (file)
@@ -508,6 +508,41 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
 }
 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
 
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+                                   u32 vport, u64 node_guid)
+{
+       int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+       void *nic_vport_context;
+       void *in;
+       int err;
+
+       if (!vport)
+               return -EINVAL;
+       if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+               return -EACCES;
+       if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+               return -ENOTSUPP;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(modify_nic_vport_context_in, in,
+                field_select.node_guid, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+       MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+       nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+                                        in, nic_vport_context);
+       MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+       err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
                                        u16 *qkey_viol_cntr)
 {
index f2fd1ef16da7eba68deb5af1648b7ff28cf7ff44..e25a73ed2981848efa6175a6eeb43ad6891df1ec 100644 (file)
@@ -72,8 +72,8 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
        u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
        u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
 
-       memset(&in, 0, sizeof(in));
-       memset(&out, 0, sizeof(out));
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
 
        MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
                 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
@@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
        struct mlx5e_vxlan *vxlan;
        int err;
 
+       if (mlx5e_vxlan_lookup_port(priv, port))
+               goto free_work;
+
        if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
                goto free_work;
 
index ce21ee5b23577ee63e8b5679a1fe2204c3ccd895..821a087c7ae221200f1a79c54ef399910325f644 100644 (file)
@@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
                return err;
        }
 
        err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
                                  &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
                goto err_db_free;
        }
 
@@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
                return err;
        }
 
        err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
                                  &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
                goto err_db_free;
        }
 
@@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
 
        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
                return err;
        }
 
-       err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+       err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+                                 &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
-               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
                goto err_db_free;
        }
 
index 1977e7a5c5301cc591fade66cb290eb7dbd163e1..57d48da709fb77076c2cd737ad87c02ef0ff3f90 100644 (file)
@@ -2718,7 +2718,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
  * Configures the switch priority to buffer table.
  */
 #define MLXSW_REG_PPTB_ID 0x500B
-#define MLXSW_REG_PPTB_LEN 0x0C
+#define MLXSW_REG_PPTB_LEN 0x10
 
 static const struct mlxsw_reg_info mlxsw_reg_pptb = {
        .id = MLXSW_REG_PPTB_ID,
@@ -2784,6 +2784,13 @@ MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
  */
 MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
 
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
 #define MLXSW_REG_PPTB_ALL_PRIO 0xFF
 
 static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
@@ -2792,6 +2799,14 @@ static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
        mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
        mlxsw_reg_pptb_local_port_set(payload, local_port);
        mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+       mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+                                                   u8 buff)
+{
+       mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+       mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
 }
 
 /* PBMC - Port Buffer Management Control Register
index 4a7273771028db08cc4e1bd9e76df1d05fd73785..374080027b2f2da103a5a2e89b16f16e689239ec 100644 (file)
@@ -171,23 +171,6 @@ static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
 }
 
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
-                                        bool *p_is_up)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       char paos_pl[MLXSW_REG_PAOS_LEN];
-       u8 oper_status;
-       int err;
-
-       mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
-       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
-       if (err)
-               return err;
-       oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
-       *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
-       return 0;
-}
-
 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
                                      unsigned char *addr)
 {
@@ -247,15 +230,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
 }
 
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                                   u8 swid)
 {
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char pspa_pl[MLXSW_REG_PSPA_LEN];
 
-       mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+       mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
 }
 
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+       return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+                                       swid);
+}
+
 static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
                                     bool enable)
 {
@@ -305,9 +296,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
 }
 
-static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-                                          u8 local_port, u8 *p_module,
-                                          u8 *p_width, u8 *p_lane)
+static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
+                                        u8 local_port, u8 *p_module,
+                                        u8 *p_width, u8 *p_lane)
 {
        char pmlp_pl[MLXSW_REG_PMLP_LEN];
        int err;
@@ -322,16 +313,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
        return 0;
 }
 
-static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
-                                        u8 local_port, u8 *p_module,
-                                        u8 *p_width)
-{
-       u8 lane;
-
-       return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
-                                              p_width, &lane);
-}
-
 static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                                    u8 module, u8 width, u8 lane)
 {
@@ -410,7 +391,11 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
        }
 
        mlxsw_sp_txhdr_construct(skb, &tx_info);
-       len = skb->len;
+       /* TX header is consumed by HW on the way so we shouldn't count its
+        * bytes as being sent.
+        */
+       len = skb->len - MLXSW_TXHDR_LEN;
+
        /* Due to a race we might fail here because of a full queue. In that
         * unlikely case we simply drop the packet.
         */
@@ -949,17 +934,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
                                            size_t len)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       u8 module, width, lane;
+       u8 module = mlxsw_sp_port->mapping.module;
+       u8 width = mlxsw_sp_port->mapping.width;
+       u8 lane = mlxsw_sp_port->mapping.lane;
        int err;
 
-       err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
-                                             mlxsw_sp_port->local_port,
-                                             &module, &width, &lane);
-       if (err) {
-               netdev_err(dev, "Failed to retrieve module information\n");
-               return err;
-       }
-
        if (!mlxsw_sp_port->split)
                err = snprintf(name, len, "p%d", module + 1);
        else
@@ -1438,7 +1417,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev,
 
        cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
                         mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
-                        SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+                        SUPPORTED_Pause | SUPPORTED_Asym_Pause |
+                        SUPPORTED_Autoneg;
        cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
        mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
                                        eth_proto_oper, cmd);
@@ -1497,7 +1477,6 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
        u32 eth_proto_new;
        u32 eth_proto_cap;
        u32 eth_proto_admin;
-       bool is_up;
        int err;
 
        speed = ethtool_cmd_speed(cmd);
@@ -1529,12 +1508,7 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
                return err;
        }
 
-       err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
-       if (err) {
-               netdev_err(dev, "Failed to get oper status");
-               return err;
-       }
-       if (!is_up)
+       if (!netif_running(dev))
                return 0;
 
        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
@@ -1681,8 +1655,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
        return 0;
 }
 
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                                 bool split, u8 module, u8 width)
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+                               bool split, u8 module, u8 width, u8 lane)
 {
        struct mlxsw_sp_port *mlxsw_sp_port;
        struct net_device *dev;
@@ -1697,6 +1671,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
        mlxsw_sp_port->local_port = local_port;
        mlxsw_sp_port->split = split;
+       mlxsw_sp_port->mapping.module = module;
+       mlxsw_sp_port->mapping.width = width;
+       mlxsw_sp_port->mapping.lane = lane;
        bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
        mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
        if (!mlxsw_sp_port->active_vlans) {
@@ -1839,28 +1816,6 @@ err_port_active_vlans_alloc:
        return err;
 }
 
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
-                               bool split, u8 module, u8 width, u8 lane)
-{
-       int err;
-
-       err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
-                                      lane);
-       if (err)
-               return err;
-
-       err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
-                                    width);
-       if (err)
-               goto err_port_create;
-
-       return 0;
-
-err_port_create:
-       mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
-       return err;
-}
-
 static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
 {
        struct net_device *dev = mlxsw_sp_port->dev;
@@ -1909,8 +1864,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
 
 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 {
+       u8 module, width, lane;
        size_t alloc_size;
-       u8 module, width;
        int i;
        int err;
 
@@ -1921,13 +1876,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
 
        for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
                err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
-                                                   &width);
+                                                   &width, &lane);
                if (err)
                        goto err_port_module_info_get;
                if (!width)
                        continue;
                mlxsw_sp->port_to_module[i] = module;
-               err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+               err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+                                          lane);
                if (err)
                        goto err_port_create;
        }
@@ -1948,12 +1904,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
        return local_port - offset;
 }
 
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+                                     u8 module, unsigned int count)
+{
+       u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+       int err, i;
+
+       for (i = 0; i < count; i++) {
+               err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+                                              width, i * width);
+               if (err)
+                       goto err_port_module_map;
+       }
+
+       for (i = 0; i < count; i++) {
+               err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+               if (err)
+                       goto err_port_swid_set;
+       }
+
+       for (i = 0; i < count; i++) {
+               err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+                                          module, width, i * width);
+               if (err)
+                       goto err_port_create;
+       }
+
+       return 0;
+
+err_port_create:
+       for (i--; i >= 0; i--)
+               mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+       i = count;
+err_port_swid_set:
+       for (i--; i >= 0; i--)
+               __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+                                        MLXSW_PORT_SWID_DISABLED_PORT);
+       i = count;
+err_port_module_map:
+       for (i--; i >= 0; i--)
+               mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+       return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+                                        u8 base_port, unsigned int count)
+{
+       u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+       int i;
+
+       /* Split by four means we need to re-create two ports, otherwise
+        * only one.
+        */
+       count = count / 2;
+
+       for (i = 0; i < count; i++) {
+               local_port = base_port + i * 2;
+               module = mlxsw_sp->port_to_module[local_port];
+
+               mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+                                        0);
+       }
+
+       for (i = 0; i < count; i++)
+               __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+       for (i = 0; i < count; i++) {
+               local_port = base_port + i * 2;
+               module = mlxsw_sp->port_to_module[local_port];
+
+               mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+                                    width, 0);
+       }
+}
+
 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                               unsigned int count)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
        struct mlxsw_sp_port *mlxsw_sp_port;
-       u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
        u8 module, cur_width, base_port;
        int i;
        int err;
@@ -1965,18 +1994,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                return -EINVAL;
        }
 
+       module = mlxsw_sp_port->mapping.module;
+       cur_width = mlxsw_sp_port->mapping.width;
+
        if (count != 2 && count != 4) {
                netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
                return -EINVAL;
        }
 
-       err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
-                                           &cur_width);
-       if (err) {
-               netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
-               return err;
-       }
-
        if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
                netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
                return -EINVAL;
@@ -2001,25 +2026,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
        for (i = 0; i < count; i++)
                mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 
-       for (i = 0; i < count; i++) {
-               err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
-                                          module, width, i * width);
-               if (err) {
-                       dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
-                       goto err_port_create;
-               }
+       err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+               goto err_port_split_create;
        }
 
        return 0;
 
-err_port_create:
-       for (i--; i >= 0; i--)
-               mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
-       for (i = 0; i < count / 2; i++) {
-               module = mlxsw_sp->port_to_module[base_port + i * 2];
-               mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
-                                    module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
-       }
+err_port_split_create:
+       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
        return err;
 }
 
@@ -2027,10 +2043,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
        struct mlxsw_sp_port *mlxsw_sp_port;
-       u8 module, cur_width, base_port;
+       u8 cur_width, base_port;
        unsigned int count;
        int i;
-       int err;
 
        mlxsw_sp_port = mlxsw_sp->ports[local_port];
        if (!mlxsw_sp_port) {
@@ -2044,12 +2059,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
                return -EINVAL;
        }
 
-       err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
-                                           &cur_width);
-       if (err) {
-               netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
-               return err;
-       }
+       cur_width = mlxsw_sp_port->mapping.width;
        count = cur_width == 1 ? 4 : 2;
 
        base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -2061,14 +2071,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
        for (i = 0; i < count; i++)
                mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
 
-       for (i = 0; i < count / 2; i++) {
-               module = mlxsw_sp->port_to_module[base_port + i * 2];
-               err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
-                                          module, MLXSW_PORT_MODULE_MAX_WIDTH,
-                                          0);
-               if (err)
-                       dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
-       }
+       mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
 
        return 0;
 }
index e2c022d3e2f3bf19df2084049920f40b7eac4c26..13b30eaa13d471180d473459aa8fe38606a0b8f1 100644 (file)
@@ -229,6 +229,11 @@ struct mlxsw_sp_port {
                struct ieee_maxrate *maxrate;
                struct ieee_pfc *pfc;
        } dcb;
+       struct {
+               u8 module;
+               u8 width;
+               u8 lane;
+       } mapping;
        /* 802.1Q bridge VLANs */
        unsigned long *active_vlans;
        unsigned long *untagged_vlans;
index a3720a0fad7d6ff0573256bc95d081e8d25ff2e1..074cdda7b6f337a6985e10a8d3620dd2825d2f3e 100644 (file)
@@ -194,7 +194,7 @@ static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
 
        mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0);
+               mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
                               pptb_pl);
 }
index 0b323661c0b6b3122c9aba83fc8d0e3d0f2e68ad..01cfb75128278ca2a1b263636c62314d5bc3d1c2 100644 (file)
@@ -103,7 +103,8 @@ static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
 
        mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]);
+               mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
+
        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
                               pptb_pl);
 }
@@ -249,6 +250,7 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
                return err;
 
        memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+       mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
 
        return 0;
 }
@@ -351,7 +353,8 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        int err;
 
-       if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) {
+       if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
+           pfc->pfc_en) {
                netdev_err(dev, "PAUSE frames already enabled on port\n");
                return -EINVAL;
        }
@@ -371,6 +374,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
        }
 
        memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+       mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
 
        return 0;
 
index 3842eab9449a4fd294ac7a24d02c6b48de4a4802..25f658b3849a6beb6c1febc9ba5beab66bec5472 100644 (file)
@@ -316,7 +316,10 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
                }
        }
        mlxsw_sx_txhdr_construct(skb, &tx_info);
-       len = skb->len;
+       /* TX header is consumed by HW on the way so we shouldn't count its
+        * bytes as being sent.
+        */
+       len = skb->len - MLXSW_TXHDR_LEN;
        /* Due to a race we might fail here because of a full queue. In that
         * unlikely case we simply drop the packet.
         */
index 7066954c39d682fe229bf8c151ac46047c4eae77..0a26b11ca8f61eff9f7c5b880b770a99948f07bf 100644 (file)
@@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
                        enc28j60_phy_read(priv, PHIR);
                }
                /* TX complete handler */
-               if ((intflags & EIR_TXIF) != 0) {
+               if (((intflags & EIR_TXIF) != 0) &&
+                   ((intflags & EIR_TXERIF) == 0)) {
                        bool err = false;
                        loop++;
                        if (netif_msg_intr(priv))
@@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
                                        enc28j60_tx_clear(ndev, true);
                        } else
                                enc28j60_tx_clear(ndev, true);
-                       locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+                       locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
                }
                /* RX Error handler */
                if ((intflags & EIR_RXERIF) != 0) {
@@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
  */
 static void enc28j60_hw_tx(struct enc28j60_net *priv)
 {
+       BUG_ON(!priv->tx_skb);
+
        if (netif_msg_tx_queued(priv))
                printk(KERN_DEBUG DRV_NAME
                        ": Tx Packet Len:%d\n", priv->tx_skb->len);
index fa47c14c743ad811252c753302d4b52e944123f8..ba26bb356b8dff20f3ef6e9efffc089d96c30281 100644 (file)
@@ -2015,7 +2015,7 @@ static void nfp_net_open_stack(struct nfp_net *nn)
 
        netif_tx_wake_all_queues(nn->netdev);
 
-       enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+       enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
        nfp_net_read_link_status(nn);
 }
 
@@ -2044,7 +2044,7 @@ static int nfp_net_netdev_open(struct net_device *netdev)
                                      NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
        if (err)
                goto err_free_exn;
-       disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+       disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
 
        nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
                               GFP_KERNEL);
@@ -2133,7 +2133,7 @@ static void nfp_net_close_stack(struct nfp_net *nn)
 {
        unsigned int r;
 
-       disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector);
+       disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
        netif_carrier_off(nn->netdev);
        nn->link_up = false;
 
index cbf58e1f9333f6b29d4d626d777c16db6a8c4fed..21ec1c2df2c7fdca71ab642345cb426f5f1c70c2 100644 (file)
@@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
                     struct dcbx_app_priority_entry *p_tbl,
                     u32 pri_tc_tbl, int count, bool dcbx_enabled)
 {
-       u8 tc, priority, priority_map;
+       u8 tc, priority_map;
        enum dcbx_protocol_type type;
        u16 protocol_id;
+       int priority;
        bool enable;
        int i;
 
@@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
                         * indication, but we only got here if there was an
                         * app tlv for the protocol, so dcbx must be enabled.
                         */
-                       enable = !!(type == DCBX_PROTOCOL_ETH);
+                       enable = !(type == DCBX_PROTOCOL_ETH);
 
                        qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
                                                 priority, tc, type);
index 089016f46f26c3ad79a104b0f9a9cf80d00cb383..2d89e8c16b3277b74c2570ba145297caa872b079 100644 (file)
@@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev)
        }
 }
 
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
 {
        u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
        struct init_qm_port_params *p_qm_port;
        u16 num_pqs, multi_cos_tcs = 1;
+       u8 pf_wfq = qm_info->pf_wfq;
+       u32 pf_rl = qm_info->pf_rl;
        u16 num_vfs = 0;
 
 #ifdef CONFIG_QED_SRIOV
@@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
 
        /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
         */
-       qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
-                                       num_pqs, GFP_KERNEL);
+       qm_info->qm_pq_params = kcalloc(num_pqs,
+                                       sizeof(struct init_qm_pq_params),
+                                       b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
        if (!qm_info->qm_pq_params)
                goto alloc_err;
 
-       qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
-                                          num_vports, GFP_KERNEL);
+       qm_info->qm_vport_params = kcalloc(num_vports,
+                                          sizeof(struct init_qm_vport_params),
+                                          b_sleepable ? GFP_KERNEL
+                                                      : GFP_ATOMIC);
        if (!qm_info->qm_vport_params)
                goto alloc_err;
 
-       qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
-                                         MAX_NUM_PORTS, GFP_KERNEL);
+       qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
+                                         sizeof(struct init_qm_port_params),
+                                         b_sleepable ? GFP_KERNEL
+                                                     : GFP_ATOMIC);
        if (!qm_info->qm_port_params)
                goto alloc_err;
 
-       qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
-                                   GFP_KERNEL);
+       qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
+                                   b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
        if (!qm_info->wfq_data)
                goto alloc_err;
 
@@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
        for (i = 0; i < qm_info->num_vports; i++)
                qm_info->qm_vport_params[i].vport_wfq = 1;
 
-       qm_info->pf_wfq = 0;
-       qm_info->pf_rl = 0;
        qm_info->vport_rl_en = 1;
        qm_info->vport_wfq_en = 1;
+       qm_info->pf_rl = pf_rl;
+       qm_info->pf_wfq = pf_wfq;
 
        return 0;
 
@@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        qed_qm_info_free(p_hwfn);
 
        /* initialize qed's qm data structure */
-       rc = qed_init_qm_info(p_hwfn);
+       rc = qed_init_qm_info(p_hwfn, false);
        if (rc)
                return rc;
 
@@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
                        goto alloc_err;
 
                /* Prepare and process QM requirements */
-               rc = qed_init_qm_info(p_hwfn);
+               rc = qed_init_qm_info(p_hwfn, true);
                if (rc)
                        goto alloc_err;
 
@@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
 
        hw_mode |= 1 << MODE_ASIC;
 
+       if (p_hwfn->cdev->num_hwfns > 1)
+               hw_mode |= 1 << MODE_100G;
+
        p_hwfn->hw_info.hw_mode = hw_mode;
+
+       DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+                  "Configuring function for hw_mode: 0x%08x\n",
+                  p_hwfn->hw_info.hw_mode);
 }
 
 /* Init run time data for all PFs on an engine. */
@@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev,
        u32 load_code, param;
        int rc, mfw_rc, i;
 
+       if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+               DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+               return -EINVAL;
+       }
+
        if (IS_PF(cdev)) {
                rc = qed_init_fw_data(cdev, bin_fw_data);
                if (rc != 0)
@@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
 {
        int i;
 
+       if (cdev->num_hwfns > 1) {
+               DP_VERBOSE(cdev,
+                          NETIF_MSG_LINK,
+                          "WFQ configuration is not supported for this device\n");
+               return;
+       }
+
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
index 9afc15fdbb02af0762a2b46f1ed5fde59322e846..e29ed5a69566357d144b1997151559792f66a656 100644 (file)
@@ -3700,6 +3700,7 @@ struct public_port {
 #define MEDIA_DA_TWINAX         0x3
 #define MEDIA_BASE_T            0x4
 #define MEDIA_SFP_1G_FIBER      0x5
+#define MEDIA_MODULE_FIBER      0x6
 #define MEDIA_KR                0xf0
 #define MEDIA_NOT_PRESENT       0xff
 
index 8fba87dd48afaf6ce9ce756bdb24112e7d921176..aada4c7e095f2130c688e30384cdb29b944cc9d5 100644 (file)
@@ -72,6 +72,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
        p_ramrod->mtu                   = cpu_to_le16(p_params->mtu);
        p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
        p_ramrod->drop_ttl0_en          = p_params->drop_ttl0;
+       p_ramrod->untagged              = p_params->only_untagged;
 
        SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
        SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -247,10 +248,6 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
                SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
                          !!(accept_filter & QED_ACCEPT_NONE));
 
-               SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
-                         (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
-                          !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
-
                SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
                          !!(accept_filter & QED_ACCEPT_NONE));
 
@@ -1748,7 +1745,8 @@ static int qed_start_vport(struct qed_dev *cdev,
                           start.vport_id, start.mtu);
        }
 
-       qed_reset_vport_stats(cdev);
+       if (params->clear_stats)
+               qed_reset_vport_stats(cdev);
 
        return 0;
 }
index 8b22f87033ce74e093831cfa43a3e4d87ac0def5..c7e01b3035407e83c8c0c2e8216275ae9c8811aa 100644 (file)
@@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
                /* Fallthrough */
 
        case QED_INT_MODE_MSI:
-               rc = pci_enable_msi(cdev->pdev);
-               if (!rc) {
-                       int_params->out.int_mode = QED_INT_MODE_MSI;
-                       goto out;
-               }
+               if (cdev->num_hwfns == 1) {
+                       rc = pci_enable_msi(cdev->pdev);
+                       if (!rc) {
+                               int_params->out.int_mode = QED_INT_MODE_MSI;
+                               goto out;
+                       }
 
-               DP_NOTICE(cdev, "Failed to enable MSI\n");
-               if (force_mode)
-                       goto out;
+                       DP_NOTICE(cdev, "Failed to enable MSI\n");
+                       if (force_mode)
+                               goto out;
+               }
                /* Fallthrough */
 
        case QED_INT_MODE_INTA:
@@ -1083,6 +1085,7 @@ static int qed_get_port_type(u32 media_type)
        case MEDIA_SFPP_10G_FIBER:
        case MEDIA_SFP_1G_FIBER:
        case MEDIA_XFP_FIBER:
+       case MEDIA_MODULE_FIBER:
        case MEDIA_KR:
                port_type = PORT_FIBRE;
                break;
@@ -1103,6 +1106,39 @@ static int qed_get_port_type(u32 media_type)
        return port_type;
 }
 
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+                            struct qed_mcp_link_params *params,
+                            struct qed_mcp_link_state *link,
+                            struct qed_mcp_link_capabilities *link_caps)
+{
+       void *p;
+
+       if (!IS_PF(hwfn->cdev)) {
+               qed_vf_get_link_params(hwfn, params);
+               qed_vf_get_link_state(hwfn, link);
+               qed_vf_get_link_caps(hwfn, link_caps);
+
+               return 0;
+       }
+
+       p = qed_mcp_get_link_params(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(params, p, sizeof(*params));
+
+       p = qed_mcp_get_link_state(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(link, p, sizeof(*link));
+
+       p = qed_mcp_get_link_capabilities(hwfn);
+       if (!p)
+               return -ENXIO;
+       memcpy(link_caps, p, sizeof(*link_caps));
+
+       return 0;
+}
+
 static void qed_fill_link(struct qed_hwfn *hwfn,
                          struct qed_link_output *if_link)
 {
@@ -1114,15 +1150,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        memset(if_link, 0, sizeof(*if_link));
 
        /* Prepare source inputs */
-       if (IS_PF(hwfn->cdev)) {
-               memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
-               memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
-               memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
-                      sizeof(link_caps));
-       } else {
-               qed_vf_get_link_params(hwfn, &params);
-               qed_vf_get_link_state(hwfn, &link);
-               qed_vf_get_link_caps(hwfn, &link_caps);
+       if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+               dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+               return;
        }
 
        /* Set the link parameters to pass to protocol driver */
index acac6626a1b29417eacdbda3751cee4cb241ce9a..b122f6013b6c1d786dcf8bb6b4b1bcea914c31e3 100644 (file)
@@ -213,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
        SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
                  DQ_XCM_CORE_SPQ_PROD_CMD);
        db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
-       /* validate producer is up to-date */
-       rmb();
-
        db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
 
-       /* do not reorder */
-       barrier();
+       /* make sure the SPQE is updated before the doorbell */
+       wmb();
 
        DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
 
        /* make sure doorbell is rang */
-       mmiowb();
+       wmb();
 
        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
                   "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
@@ -614,7 +610,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
 
                        *p_en2 = *p_ent;
 
-                       kfree(p_ent);
+                       /* EBLOCK responsible to free the allocated p_ent */
+                       if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+                               kfree(p_ent);
 
                        p_ent = p_en2;
                }
@@ -749,6 +747,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
                 * Thus, after gaining the answer perform the cleanup here.
                 */
                rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+               if (p_ent->queue == &p_spq->unlimited_pending) {
+                       /* This is an allocated p_ent which does not need to
+                        * return to pool.
+                        */
+                       kfree(p_ent);
+                       return rc;
+               }
+
                if (rc)
                        goto spq_post_fail2;
 
@@ -844,8 +851,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
                found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
                                        fw_return_code);
 
-       if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
-               /* EBLOCK is responsible for freeing its own entry */
+       if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+           (found->queue == &p_spq->unlimited_pending))
+               /* EBLOCK  is responsible for returning its own entry into the
+                * free list, unless it originally added the entry into the
+                * unlimited pending list.
+                */
                qed_spq_return_entry(p_hwfn, found);
 
        /* Attempt to post pending requests */
index c8667c65e685b3be8709a0daa188a1e07d389e22..c90b2b6ad96937feb7f04ef1a8123a73bca061d4 100644 (file)
 #include "qed_vf.h"
 #define QED_VF_ARRAY_LENGTH (3)
 
+#ifdef CONFIG_QED_SRIOV
 #define IS_VF(cdev)             ((cdev)->b_is_vf)
 #define IS_PF(cdev)             (!((cdev)->b_is_vf))
-#ifdef CONFIG_QED_SRIOV
 #define IS_PF_SRIOV(p_hwfn)     (!!((p_hwfn)->cdev->p_iov_info))
 #else
+#define IS_VF(cdev)             (0)
+#define IS_PF(cdev)             (1)
 #define IS_PF_SRIOV(p_hwfn)     (0)
 #endif
 #define IS_PF_SRIOV_ALLOC(p_hwfn)       (!!((p_hwfn)->pf_iov_info))
index 1bc75358cbc471856ebd1626a8d6432653b4934e..ad3cae3b7243b54f5b2a74413d7464eda32d67ab 100644 (file)
@@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
        case ETH_SS_PRIV_FLAGS:
                return QEDE_PRI_FLAG_LEN;
        case ETH_SS_TEST:
-               return QEDE_ETHTOOL_TEST_MAX;
+               if (!IS_VF(edev))
+                       return QEDE_ETHTOOL_TEST_MAX;
+               else
+                       return 0;
        default:
                DP_VERBOSE(edev, QED_MSG_DEBUG,
                           "Unsupported stringset 0x%08x\n", stringset);
index 337e839ca586724edd7a00245ef689b7ffaf4833..f8e11f953acb060b4501ab03da0743a7dc548dab 100644 (file)
@@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = {
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
        {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
        { 0 }
 };
 
@@ -1824,7 +1826,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx,
 {
        struct qede_dev *edev = netdev_priv(dev);
 
-       return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
+       return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
                                        max_tx_rate);
 }
 
@@ -2091,6 +2093,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
        edev->accept_any_vlan = false;
 }
 
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       netdev_features_t changes = features ^ dev->features;
+       bool need_reload = false;
+
+       /* No action needed if hardware GRO is disabled during driver load */
+       if (changes & NETIF_F_GRO) {
+               if (dev->features & NETIF_F_GRO)
+                       need_reload = !edev->gro_disable;
+               else
+                       need_reload = edev->gro_disable;
+       }
+
+       if (need_reload && netif_running(edev->ndev)) {
+               dev->features = features;
+               qede_reload(edev, NULL, NULL);
+               return 1;
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_QEDE_VXLAN
 static void qede_add_vxlan_port(struct net_device *dev,
                                sa_family_t sa_family, __be16 port)
@@ -2175,6 +2200,7 @@ static const struct net_device_ops qede_netdev_ops = {
 #endif
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+       .ndo_set_features = qede_set_features,
        .ndo_get_stats64 = qede_get_stats64,
 #ifdef CONFIG_QED_SRIOV
        .ndo_set_vf_link_state = qede_set_vf_link_state,
@@ -3205,7 +3231,7 @@ static int qede_stop_queues(struct qede_dev *edev)
        return rc;
 }
 
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 {
        int rc, tc, i;
        int vlan_removal_en = 1;
@@ -3436,6 +3462,7 @@ out:
 
 enum qede_load_mode {
        QEDE_LOAD_NORMAL,
+       QEDE_LOAD_RELOAD,
 };
 
 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3474,7 +3501,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
                goto err3;
        DP_INFO(edev, "Setup IRQs succeeded\n");
 
-       rc = qede_start_queues(edev);
+       rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
        if (rc)
                goto err4;
        DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3529,7 +3556,7 @@ void qede_reload(struct qede_dev *edev,
        if (func)
                func(edev, args);
 
-       qede_load(edev, QEDE_LOAD_NORMAL);
+       qede_load(edev, QEDE_LOAD_RELOAD);
 
        mutex_lock(&edev->qede_lock);
        qede_config_rx_mode(edev->ndev);
index 7bd6f25b4625f3bd634f9ac519127edca0a4fbce..87c642d3b075b2bc9845ba2cdbb4204e788028c3 100644 (file)
@@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        tx_ring->tx_stats.tx_bytes += skb->len;
        tx_ring->tx_stats.xmit_called++;
 
+       /* Ensure writes are complete before HW fetches Tx descriptors */
+       wmb();
        qlcnic_update_cmd_producer(tx_ring);
 
        return NETDEV_TX_OK;
@@ -2220,7 +2222,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
        if (!opcode)
                return;
 
-       ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+       ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
        qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
        desc = &sds_ring->desc_head[consumer];
        desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
index 83d72106471c1237171f68955a1885df3ed950c1..fd5d1c93b55b7d8f31147b73d51aa562f5f0714f 100644 (file)
@@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev)
        }
 
        /* Disabling the timer */
-       del_timer_sync(&qdev->timer);
        ql_cancel_all_work_sync(qdev);
 
        for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
                return PCI_ERS_RESULT_CAN_RECOVER;
        case pci_channel_io_frozen:
                netif_device_detach(ndev);
+               del_timer_sync(&qdev->timer);
                if (netif_running(ndev))
                        ql_eeh_close(ndev);
                pci_disable_device(pdev);
@@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
        case pci_channel_io_perm_failure:
                dev_err(&pdev->dev,
                        "%s: pci_channel_io_perm_failure.\n", __func__);
+               del_timer_sync(&qdev->timer);
                ql_eeh_close(ndev);
                set_bit(QL_EEH_FATAL, &qdev->flags);
                return PCI_ERS_RESULT_DISCONNECT;
index 1681084cc96f8270d36e55d9ede099a58632566a..1f309127457d24365c611c505f4611659d445209 100644 (file)
@@ -619,6 +619,17 @@ fail:
        return rc;
 }
 
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
+
+       /* All our existing PIO buffers went away */
+       efx_for_each_channel(channel, efx)
+               efx_for_each_channel_tx_queue(tx_queue, channel)
+                       tx_queue->piobuf = NULL;
+}
+
 #else /* !EFX_USE_PIO */
 
 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
@@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
 {
 }
 
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+}
+
 #endif /* EFX_USE_PIO */
 
 static void efx_ef10_remove(struct efx_nic *efx)
@@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
        nic_data->must_realloc_vis = true;
        nic_data->must_restore_filters = true;
        nic_data->must_restore_piobufs = true;
+       efx_ef10_forget_old_piobufs(efx);
        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 
        /* Driver-created vswitches and vports must be re-created */
index 0705ec869487921f89252c8ef99420375a83f491..097f363f16309583412b97021e94244b926c9498 100644 (file)
@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
 
 #ifdef CONFIG_RFS_ACCEL
        if (efx->type->offload_features & NETIF_F_NTUPLE) {
-               efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
-                                          sizeof(*efx->rps_flow_id),
-                                          GFP_KERNEL);
-               if (!efx->rps_flow_id) {
+               struct efx_channel *channel;
+               int i, success = 1;
+
+               efx_for_each_channel(channel, efx) {
+                       channel->rps_flow_id =
+                               kcalloc(efx->type->max_rx_ip_filters,
+                                       sizeof(*channel->rps_flow_id),
+                                       GFP_KERNEL);
+                       if (!channel->rps_flow_id)
+                               success = 0;
+                       else
+                               for (i = 0;
+                                    i < efx->type->max_rx_ip_filters;
+                                    ++i)
+                                       channel->rps_flow_id[i] =
+                                               RPS_FLOW_ID_INVALID;
+               }
+
+               if (!success) {
+                       efx_for_each_channel(channel, efx)
+                               kfree(channel->rps_flow_id);
                        efx->type->filter_table_remove(efx);
                        rc = -ENOMEM;
                        goto out_unlock;
                }
+
+               efx->rps_expire_index = efx->rps_expire_channel = 0;
        }
 #endif
 out_unlock:
@@ -1744,7 +1763,10 @@ out_unlock:
 static void efx_remove_filters(struct efx_nic *efx)
 {
 #ifdef CONFIG_RFS_ACCEL
-       kfree(efx->rps_flow_id);
+       struct efx_channel *channel;
+
+       efx_for_each_channel(channel, efx)
+               kfree(channel->rps_flow_id);
 #endif
        down_write(&efx->filter_sem);
        efx->type->filter_table_remove(efx);
index 133e9e35be9e9212bf14531bffd1c0cf78fde742..4c83739d158f1d36714b40d4eedfbec34c942882 100644 (file)
@@ -104,7 +104,8 @@ int efx_farch_test_registers(struct efx_nic *efx,
                             const struct efx_farch_register_test *regs,
                             size_t n_regs)
 {
-       unsigned address = 0, i, j;
+       unsigned address = 0;
+       int i, j;
        efx_oword_t mask, imask, original, reg, buf;
 
        for (i = 0; i < n_regs; ++i) {
index 7f295c4d7b80cb866c3f2f10f71aeeaefaf3caa5..2a9228a6e4a08cea22ba5bed9e1eae24cc25f591 100644 (file)
@@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
 
        case MC_CMD_MEDIA_XFP:
        case MC_CMD_MEDIA_SFP_PLUS:
-               result |= SUPPORTED_FIBRE;
-               break;
-
        case MC_CMD_MEDIA_QSFP_PLUS:
                result |= SUPPORTED_FIBRE;
+               if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+                       result |= SUPPORTED_1000baseT_Full;
+               if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+                       result |= SUPPORTED_10000baseT_Full;
                if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
                        result |= SUPPORTED_40000baseCR4_Full;
                break;
index 38c422321cdafe299111746f77e2d5ca906a402f..d13ddf9703ff3a220a2d1e7bb5505741eccb8e8c 100644 (file)
@@ -403,6 +403,8 @@ enum efx_sync_events_state {
  * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
  * @irq_count: Number of IRQs since last adaptive moderation decision
  * @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ *      indexed by filter ID
  * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
  * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
  * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@ struct efx_channel {
        unsigned int irq_mod_score;
 #ifdef CONFIG_RFS_ACCEL
        unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+       u32 *rps_flow_id;
 #endif
 
        unsigned n_rx_tobe_disc;
@@ -889,9 +893,9 @@ struct vfdi_status;
  * @filter_sem: Filter table rw_semaphore, for freeing the table
  * @filter_lock: Filter table lock, for mere content changes
  * @filter_state: Architecture-dependent filter table state
- * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
- *     indexed by filter ID
- * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ *     @rps_expire_channel's @rps_flow_id
  * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
  * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
  *     Decremented when the efx_flush_rx_queue() is called.
@@ -1035,7 +1039,7 @@ struct efx_nic {
        spinlock_t filter_lock;
        void *filter_state;
 #ifdef CONFIG_RFS_ACCEL
-       u32 *rps_flow_id;
+       unsigned int rps_expire_channel;
        unsigned int rps_expire_index;
 #endif
 
index 8956995b2fe713b377f205a55fada36c8a04253a..02b0b5272c14f6148c1999a06a8b71baacf54edc 100644 (file)
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_channel *channel;
        struct efx_filter_spec spec;
-       const __be16 *ports;
-       __be16 ether_type;
-       int nhoff;
+       struct flow_keys fk;
        int rc;
 
-       /* The core RPS/RFS code has already parsed and validated
-        * VLAN, IP and transport headers.  We assume they are in the
-        * header area.
-        */
-
-       if (skb->protocol == htons(ETH_P_8021Q)) {
-               const struct vlan_hdr *vh =
-                       (const struct vlan_hdr *)skb->data;
+       if (flow_id == RPS_FLOW_ID_INVALID)
+               return -EINVAL;
 
-               /* We can't filter on the IP 5-tuple and the vlan
-                * together, so just strip the vlan header and filter
-                * on the IP part.
-                */
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
-               ether_type = vh->h_vlan_encapsulated_proto;
-               nhoff = sizeof(struct vlan_hdr);
-       } else {
-               ether_type = skb->protocol;
-               nhoff = 0;
-       }
+       if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+               return -EPROTONOSUPPORT;
 
-       if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+       if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+               return -EPROTONOSUPPORT;
+       if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
                return -EPROTONOSUPPORT;
 
        efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
                EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
                EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
                EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
-       spec.ether_type = ether_type;
-
-       if (ether_type == htons(ETH_P_IP)) {
-               const struct iphdr *ip =
-                       (const struct iphdr *)(skb->data + nhoff);
-
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
-               if (ip_is_fragment(ip))
-                       return -EPROTONOSUPPORT;
-               spec.ip_proto = ip->protocol;
-               spec.rem_host[0] = ip->saddr;
-               spec.loc_host[0] = ip->daddr;
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
-               ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+       spec.ether_type = fk.basic.n_proto;
+       spec.ip_proto = fk.basic.ip_proto;
+
+       if (fk.basic.n_proto == htons(ETH_P_IP)) {
+               spec.rem_host[0] = fk.addrs.v4addrs.src;
+               spec.loc_host[0] = fk.addrs.v4addrs.dst;
        } else {
-               const struct ipv6hdr *ip6 =
-                       (const struct ipv6hdr *)(skb->data + nhoff);
-
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) <
-                                   nhoff + sizeof(*ip6) + 4);
-               spec.ip_proto = ip6->nexthdr;
-               memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
-               memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
-               ports = (const __be16 *)(ip6 + 1);
+               memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+               memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
        }
 
-       spec.rem_port = ports[0];
-       spec.loc_port = ports[1];
+       spec.rem_port = fk.ports.src;
+       spec.loc_port = fk.ports.dst;
 
        rc = efx->type->filter_rfs_insert(efx, &spec);
        if (rc < 0)
                return rc;
 
        /* Remember this so we can check whether to expire the filter later */
-       efx->rps_flow_id[rc] = flow_id;
-       channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+       channel = efx_get_channel(efx, rxq_index);
+       channel->rps_flow_id[rc] = flow_id;
        ++channel->rfs_filters_added;
 
-       if (ether_type == htons(ETH_P_IP))
+       if (spec.ether_type == htons(ETH_P_IP))
                netif_info(efx, rx_status, efx->net_dev,
                           "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
                           (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-                          spec.rem_host, ntohs(ports[0]), spec.loc_host,
-                          ntohs(ports[1]), rxq_index, flow_id, rc);
+                          spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+                          ntohs(spec.loc_port), rxq_index, flow_id, rc);
        else
                netif_info(efx, rx_status, efx->net_dev,
                           "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
                           (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-                          spec.rem_host, ntohs(ports[0]), spec.loc_host,
-                          ntohs(ports[1]), rxq_index, flow_id, rc);
+                          spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+                          ntohs(spec.loc_port), rxq_index, flow_id, rc);
 
        return rc;
 }
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
 {
        bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
-       unsigned int index, size;
+       unsigned int channel_idx, index, size;
        u32 flow_id;
 
        if (!spin_trylock_bh(&efx->filter_lock))
                return false;
 
        expire_one = efx->type->filter_rfs_expire_one;
+       channel_idx = efx->rps_expire_channel;
        index = efx->rps_expire_index;
        size = efx->type->max_rx_ip_filters;
        while (quota--) {
-               flow_id = efx->rps_flow_id[index];
-               if (expire_one(efx, flow_id, index))
+               struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+               flow_id = channel->rps_flow_id[index];
+
+               if (flow_id != RPS_FLOW_ID_INVALID &&
+                   expire_one(efx, flow_id, index)) {
                        netif_info(efx, rx_status, efx->net_dev,
-                                  "expired filter %d [flow %u]\n",
-                                  index, flow_id);
-               if (++index == size)
+                                  "expired filter %d [queue %u flow %u]\n",
+                                  index, channel_idx, flow_id);
+                       channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+               }
+               if (++index == size) {
+                       if (++channel_idx == efx->n_channels)
+                               channel_idx = 0;
                        index = 0;
+               }
        }
+       efx->rps_expire_channel = channel_idx;
        efx->rps_expire_index = index;
 
        spin_unlock_bh(&efx->filter_lock);
index 8af25563f627034e49be2304ad8531ed6c19bd99..b5ab5e120bca3a8631e3634134f740449773e4b2 100644 (file)
@@ -116,7 +116,6 @@ struct smsc911x_data {
 
        struct phy_device *phy_dev;
        struct mii_bus *mii_bus;
-       int phy_irq[PHY_MAX_ADDR];
        unsigned int using_extphy;
        int last_duplex;
        int last_carrier;
@@ -1073,7 +1072,6 @@ static int smsc911x_mii_init(struct platform_device *pdev,
        pdata->mii_bus->priv = pdata;
        pdata->mii_bus->read = smsc911x_mii_read;
        pdata->mii_bus->write = smsc911x_mii_write;
-       memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
 
        pdata->mii_bus->parent = &pdev->dev;
 
index 4f7283d05588b53be530270b481cc100d05eb63a..44da877d2483806daf07c2c57760774141d635bf 100644 (file)
@@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
                struct netdev_hw_addr *ha;
 
                netdev_for_each_uc_addr(ha, dev) {
-                       dwmac4_set_umac_addr(ioaddr, ha->addr, reg);
+                       dwmac4_set_umac_addr(hw, ha->addr, reg);
                        reg++;
                }
        }
index eac45d0c75e2c0cc3855f69767737bc9fb7c440f..e4071265be76f2240915879f13130fa30e592a39 100644 (file)
@@ -2804,7 +2804,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
                                priv->tx_path_in_lpi_mode = true;
                        if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
                                priv->tx_path_in_lpi_mode = false;
-                       if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+                       if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
                                priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
                                                        priv->rx_tail_addr,
                                                        STMMAC_CHAN0);
@@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev)
        if (!netif_running(ndev))
                return 0;
 
-       spin_lock_irqsave(&priv->lock, flags);
-
        /* Power Down bit, into the PM register, is cleared
         * automatically as soon as a magic packet or a Wake-up frame
         * is received. Anyway, it's better to manually clear
@@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev)
         * from another devices (e.g. serial console).
         */
        if (device_may_wakeup(priv->device)) {
+               spin_lock_irqsave(&priv->lock, flags);
                priv->hw->mac->pmt(priv->hw, 0);
+               spin_unlock_irqrestore(&priv->lock, flags);
                priv->irq_wake = 0;
        } else {
                pinctrl_pm_select_default_state(priv->device);
@@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev)
 
        netif_device_attach(ndev);
 
+       spin_lock_irqsave(&priv->lock, flags);
+
        priv->cur_rx = 0;
        priv->dirty_rx = 0;
        priv->dirty_tx = 0;
index 3f83c369f56c46f306d84916586e872c05b6067e..ec295851812b0fcc299f09aedcbc750369670b53 100644 (file)
@@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev)
                return -ENOMEM;
 
        if (mdio_bus_data->irqs)
-               memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq));
+               memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
 
 #ifdef CONFIG_OF
        if (priv->device->of_node)
index 4b08a2f52b3e6074218fb6ba876a639855818d3d..53190894f17a04d00cd8061ad95dcd2776ffcda6 100644 (file)
@@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
        if (priv->coal_intvl != 0) {
                struct ethtool_coalesce coal;
 
-               coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+               coal.rx_coalesce_usecs = priv->coal_intvl;
                cpsw_set_coalesce(ndev, &coal);
        }
 
@@ -2505,8 +2505,6 @@ static int cpsw_probe(struct platform_device *pdev)
 clean_ale_ret:
        cpsw_ale_destroy(priv->ale);
 clean_dma_ret:
-       cpdma_chan_destroy(priv->txch);
-       cpdma_chan_destroy(priv->rxch);
        cpdma_ctlr_destroy(priv->dma);
 clean_runtime_disable_ret:
        pm_runtime_disable(&pdev->dev);
@@ -2534,8 +2532,6 @@ static int cpsw_remove(struct platform_device *pdev)
        unregister_netdev(ndev);
 
        cpsw_ale_destroy(priv->ale);
-       cpdma_chan_destroy(priv->txch);
-       cpdma_chan_destroy(priv->rxch);
        cpdma_ctlr_destroy(priv->dma);
        pm_runtime_disable(&pdev->dev);
        device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
index 0a15acc075b3abfac4ce3ac5eed3f1bbf1f0f76e..11213a38c7952be27cd032933881df9124ed5ac8 100644 (file)
@@ -462,7 +462,7 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
        if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
                struct mpipe_data *md = &mpipe_data[instance];
                struct skb_shared_hwtstamps shhwtstamps;
-               struct timespec ts;
+               struct timespec64 ts;
 
                shtx->tx_flags |= SKBTX_IN_PROGRESS;
                gxio_mpipe_get_timestamp(&md->context, &ts);
@@ -886,9 +886,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
 /* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
 static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
 {
-       struct timespec ts;
+       struct timespec64 ts;
 
-       getnstimeofday(&ts);
+       ktime_get_ts64(&ts);
        gxio_mpipe_set_timestamp(&md->context, &ts);
 
        mutex_init(&md->ptp_lock);
index b0be0234abf6a3bc8a1060434a2b5c6af88112f5..a957a1c7e5babd91b5fa824afa266956a7e8d4fc 100644 (file)
@@ -17,4 +17,4 @@ skfp-objs :=  skfddi.o    hwmtm.o    fplustm.o  smt.o      cfm.o     \
 #   projects. To keep the source common for all those drivers (and
 #   thus simplify fixes to it), please do not clean it up!
 
-ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
index cadefe4fdaa2aa71cfab4dc8dc0dd4b326f5745b..9b3dc3c61e00b5839fbd6c98ea6b3383b753df84 100644 (file)
@@ -958,8 +958,8 @@ tx_error:
                dev->stats.collisions++;
        else if (err == -ENETUNREACH)
                dev->stats.tx_carrier_errors++;
-       else
-               dev->stats.tx_errors++;
+
+       dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
@@ -1048,8 +1048,8 @@ tx_error:
                dev->stats.collisions++;
        else if (err == -ENETUNREACH)
                dev->stats.tx_carrier_errors++;
-       else
-               dev->stats.tx_errors++;
+
+       dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 #endif
@@ -1072,12 +1072,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
 {
+       struct geneve_dev *geneve = netdev_priv(dev);
        /* The max_mtu calculation does not take account of GENEVE
         * options, to avoid excluding potentially valid
         * configurations.
         */
-       int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
-               - dev->hard_header_len;
+       int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
+
+       if (geneve->remote.sa.sa_family == AF_INET6)
+               max_mtu -= sizeof(struct ipv6hdr);
+       else
+               max_mtu -= sizeof(struct iphdr);
 
        if (new_mtu < 68)
                return -EINVAL;
@@ -1508,6 +1513,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
 {
        struct nlattr *tb[IFLA_MAX + 1];
        struct net_device *dev;
+       LIST_HEAD(list_kill);
        int err;
 
        memset(tb, 0, sizeof(tb));
@@ -1519,8 +1525,10 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
        err = geneve_configure(net, dev, &geneve_remote_unspec,
                               0, 0, 0, 0, htons(dst_port), true,
                               GENEVE_F_UDP_ZERO_CSUM6_RX);
-       if (err)
-               goto err;
+       if (err) {
+               free_netdev(dev);
+               return ERR_PTR(err);
+       }
 
        /* openvswitch users expect packet sizes to be unrestricted,
         * so set the largest MTU we can.
@@ -1529,10 +1537,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
        if (err)
                goto err;
 
+       err = rtnl_configure_link(dev, NULL);
+       if (err < 0)
+               goto err;
+
        return dev;
 
  err:
-       free_netdev(dev);
+       geneve_dellink(dev, &list_kill);
+       unregister_netdevice_many(&list_kill);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
index 47ee2c840b55e730a8913b9736e8fdef073574dc..8bcd78f9496638e30c313abb74d004729d0dae16 100644 (file)
@@ -605,12 +605,41 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
        dev_put(dev);
 }
 
+static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
+                                            unsigned char **iv,
+                                            struct scatterlist **sg)
+{
+       size_t size, iv_offset, sg_offset;
+       struct aead_request *req;
+       void *tmp;
+
+       size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
+       iv_offset = size;
+       size += GCM_AES_IV_LEN;
+
+       size = ALIGN(size, __alignof__(struct scatterlist));
+       sg_offset = size;
+       size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+
+       tmp = kmalloc(size, GFP_ATOMIC);
+       if (!tmp)
+               return NULL;
+
+       *iv = (unsigned char *)(tmp + iv_offset);
+       *sg = (struct scatterlist *)(tmp + sg_offset);
+       req = tmp;
+
+       aead_request_set_tfm(req, tfm);
+
+       return req;
+}
+
 static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
                                      struct net_device *dev)
 {
        int ret;
-       struct scatterlist sg[MAX_SKB_FRAGS + 1];
-       unsigned char iv[GCM_AES_IV_LEN];
+       struct scatterlist *sg;
+       unsigned char *iv;
        struct ethhdr *eth;
        struct macsec_eth_header *hh;
        size_t unprotected_len;
@@ -668,8 +697,6 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
        macsec_fill_sectag(hh, secy, pn);
        macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
 
-       macsec_fill_iv(iv, secy->sci, pn);
-
        skb_put(skb, secy->icv_len);
 
        if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
@@ -684,13 +711,15 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
                return ERR_PTR(-EINVAL);
        }
 
-       req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
+       req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
        if (!req) {
                macsec_txsa_put(tx_sa);
                kfree_skb(skb);
                return ERR_PTR(-ENOMEM);
        }
 
+       macsec_fill_iv(iv, secy->sci, pn);
+
        sg_init_table(sg, MAX_SKB_FRAGS + 1);
        skb_to_sgvec(skb, sg, 0, skb->len);
 
@@ -861,7 +890,6 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
 out:
        macsec_rxsa_put(rx_sa);
        dev_put(dev);
-       return;
 }
 
 static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
@@ -871,8 +899,8 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
                                      struct macsec_secy *secy)
 {
        int ret;
-       struct scatterlist sg[MAX_SKB_FRAGS + 1];
-       unsigned char iv[GCM_AES_IV_LEN];
+       struct scatterlist *sg;
+       unsigned char *iv;
        struct aead_request *req;
        struct macsec_eth_header *hdr;
        u16 icv_len = secy->icv_len;
@@ -882,7 +910,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
+       req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
        if (!req) {
                kfree_skb(skb);
                return ERR_PTR(-ENOMEM);
@@ -1234,7 +1262,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
        struct crypto_aead *tfm;
        int ret;
 
-       tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
        if (!tfm || IS_ERR(tfm))
                return NULL;
 
@@ -2612,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
                u64_stats_update_begin(&secy_stats->syncp);
                secy_stats->stats.OutPktsUntagged++;
                u64_stats_update_end(&secy_stats->syncp);
+               skb->dev = macsec->real_dev;
                len = skb->len;
                ret = dev_queue_xmit(skb);
                count_tx(dev, ret, len);
@@ -3361,6 +3390,7 @@ static void __exit macsec_exit(void)
        genl_unregister_family(&macsec_fam);
        rtnl_link_unregister(&macsec_link_ops);
        unregister_netdevice_notifier(&macsec_notifier);
+       rcu_barrier();
 }
 
 module_init(macsec_init);
index 2afa61b51d411d45bd0b673e6cc93125c2eb9927..91177a4a32ad21c1cbea044424659f5604cef7de 100644 (file)
@@ -57,6 +57,7 @@
 
 /* PHY CTRL bits */
 #define DP83867_PHYCR_FIFO_DEPTH_SHIFT         14
+#define DP83867_PHYCR_FIFO_DEPTH_MASK          (3 << 14)
 
 /* RGMIIDCTL bits */
 #define DP83867_RGMII_TX_CLK_DELAY_SHIFT       4
@@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
 static int dp83867_config_init(struct phy_device *phydev)
 {
        struct dp83867_private *dp83867;
-       int ret;
-       u16 val, delay;
+       int ret, val;
+       u16 delay;
 
        if (!phydev->priv) {
                dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
        }
 
        if (phy_interface_is_rgmii(phydev)) {
-               ret = phy_write(phydev, MII_DP83867_PHYCTRL,
-                       (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+               val = phy_read(phydev, MII_DP83867_PHYCTRL);
+               if (val < 0)
+                       return val;
+               val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+               val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+               ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
                if (ret)
                        return ret;
        }
index 2d2e4339f0df40b5379be29b12f87587db1944a3..9ec7f735343482eb4db054daa5fb5af4cec236ba 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/gpio.h>
+#include <linux/idr.h>
 
 #define MII_REGS_NUM 29
 
@@ -286,6 +287,8 @@ err_regs:
 }
 EXPORT_SYMBOL_GPL(fixed_phy_add);
 
+static DEFINE_IDA(phy_fixed_ida);
+
 static void fixed_phy_del(int phy_addr)
 {
        struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -297,14 +300,12 @@ static void fixed_phy_del(int phy_addr)
                        if (gpio_is_valid(fp->link_gpio))
                                gpio_free(fp->link_gpio);
                        kfree(fp);
+                       ida_simple_remove(&phy_fixed_ida, phy_addr);
                        return;
                }
        }
 }
 
-static int phy_fixed_addr;
-static DEFINE_SPINLOCK(phy_fixed_addr_lock);
-
 struct phy_device *fixed_phy_register(unsigned int irq,
                                      struct fixed_phy_status *status,
                                      int link_gpio,
@@ -319,17 +320,15 @@ struct phy_device *fixed_phy_register(unsigned int irq,
                return ERR_PTR(-EPROBE_DEFER);
 
        /* Get the next available PHY address, up to PHY_MAX_ADDR */
-       spin_lock(&phy_fixed_addr_lock);
-       if (phy_fixed_addr == PHY_MAX_ADDR) {
-               spin_unlock(&phy_fixed_addr_lock);
-               return ERR_PTR(-ENOSPC);
-       }
-       phy_addr = phy_fixed_addr++;
-       spin_unlock(&phy_fixed_addr_lock);
+       phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+       if (phy_addr < 0)
+               return ERR_PTR(phy_addr);
 
        ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
-       if (ret < 0)
+       if (ret < 0) {
+               ida_simple_remove(&phy_fixed_ida, phy_addr);
                return ERR_PTR(ret);
+       }
 
        phy = get_phy_device(fmb->mii_bus, phy_addr, false);
        if (IS_ERR(phy)) {
@@ -434,6 +433,7 @@ static void __exit fixed_mdio_bus_exit(void)
                list_del(&fp->node);
                kfree(fp);
        }
+       ida_destroy(&phy_fixed_ida);
 }
 module_exit(fixed_mdio_bus_exit);
 
index 280e8795b46367af40d717d9b16d56f748d6f585..ec2c1eee64051919a7623abf160eacf4233b57de 100644 (file)
@@ -285,6 +285,48 @@ static int marvell_config_aneg(struct phy_device *phydev)
        return 0;
 }
 
+static int m88e1111_config_aneg(struct phy_device *phydev)
+{
+       int err;
+
+       /* The Marvell PHY has an errata which requires
+        * that certain registers get written in order
+        * to restart autonegotiation
+        */
+       err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+       err = marvell_set_polarity(phydev, phydev->mdix);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
+                       MII_M1111_PHY_LED_DIRECT);
+       if (err < 0)
+               return err;
+
+       err = genphy_config_aneg(phydev);
+       if (err < 0)
+               return err;
+
+       if (phydev->autoneg != AUTONEG_ENABLE) {
+               int bmcr;
+
+               /* A write to speed/duplex bits (that is performed by
+                * genphy_config_aneg() call above) must be followed by
+                * a software reset. Otherwise, the write has no effect.
+                */
+               bmcr = phy_read(phydev, MII_BMCR);
+               if (bmcr < 0)
+                       return bmcr;
+
+               err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_OF_MDIO
 /*
  * Set and/or override some configuration registers based on the
@@ -407,15 +449,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
-       phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
-       phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
-       phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
-
-       err = genphy_config_aneg(phydev);
-
-       return err;
+       return genphy_config_aneg(phydev);
 }
 
 static int m88e1318_config_aneg(struct phy_device *phydev)
@@ -636,6 +670,28 @@ static int m88e1111_config_init(struct phy_device *phydev)
        return phy_write(phydev, MII_BMCR, BMCR_RESET);
 }
 
+static int m88e1121_config_init(struct phy_device *phydev)
+{
+       int err, oldpage;
+
+       oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+       err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+       if (err < 0)
+               return err;
+
+       /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+       err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
+                       MII_88E1121_PHY_LED_DEF);
+       if (err < 0)
+               return err;
+
+       phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+
+       /* Set marvell,reg-init configuration from device tree */
+       return marvell_config_init(phydev);
+}
+
 static int m88e1510_config_init(struct phy_device *phydev)
 {
        int err;
@@ -668,7 +724,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
                        return err;
        }
 
-       return marvell_config_init(phydev);
+       return m88e1121_config_init(phydev);
 }
 
 static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -1161,7 +1217,7 @@ static struct phy_driver marvell_drivers[] = {
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
                .config_init = &m88e1111_config_init,
-               .config_aneg = &marvell_config_aneg,
+               .config_aneg = &m88e1111_config_aneg,
                .read_status = &marvell_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
@@ -1196,7 +1252,7 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
-               .config_init = &marvell_config_init,
+               .config_init = &m88e1121_config_init,
                .config_aneg = &m88e1121_config_aneg,
                .read_status = &marvell_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
@@ -1215,7 +1271,7 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = marvell_probe,
-               .config_init = &marvell_config_init,
+               .config_init = &m88e1121_config_init,
                .config_aneg = &m88e1318_config_aneg,
                .read_status = &marvell_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
index 2e21e9366f76a184aa2c8d770557e6d9ff0db235..b62c4aaee40bd8986a80965a615d327e9c270293 100644 (file)
@@ -75,22 +75,13 @@ static int smsc_phy_reset(struct phy_device *phydev)
         * in all capable mode before using it.
         */
        if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
-               int timeout = 50000;
-
-               /* set "all capable" mode and reset the phy */
+               /* set "all capable" mode */
                rc |= MII_LAN83C185_MODE_ALL;
                phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
-               phy_write(phydev, MII_BMCR, BMCR_RESET);
-
-               /* wait end of reset (max 500 ms) */
-               do {
-                       udelay(10);
-                       if (timeout-- == 0)
-                               return -1;
-                       rc = phy_read(phydev, MII_BMCR);
-               } while (rc & BMCR_RESET);
        }
-       return 0;
+
+       /* reset the phy */
+       return genphy_soft_reset(phydev);
 }
 
 static int lan911x_config_init(struct phy_device *phydev)
index 8dedafa1a95d0b2f8e1db526cc64770876488f46..a30ee427efab3330492c9cda8ceb65259c190423 100644 (file)
@@ -2601,8 +2601,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
        spin_lock_bh(&pn->all_channels_lock);
        list_del(&pch->list);
        spin_unlock_bh(&pn->all_channels_lock);
-       put_net(pch->chan_net);
-       pch->chan_net = NULL;
 
        pch->file.dead = 1;
        wake_up_interruptible(&pch->file.rwait);
@@ -3136,6 +3134,9 @@ ppp_disconnect_channel(struct channel *pch)
  */
 static void ppp_destroy_channel(struct channel *pch)
 {
+       put_net(pch->chan_net);
+       pch->chan_net = NULL;
+
        atomic_dec(&channel_count);
 
        if (!pch->file.dead) {
index a0f64cba86badceadeb86cfb6fd11554b6cd27ea..fdee772073236a0542eabc00461957e487fa1370 100644 (file)
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
 #define TEAM_ENC_FEATURES      (NETIF_F_HW_CSUM | NETIF_F_SG | \
                                 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
 
-static void __team_compute_features(struct team *team)
+static void ___team_compute_features(struct team *team)
 {
        struct team_port *port;
        u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team)
        team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
        if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
                team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
+}
 
+static void __team_compute_features(struct team *team)
+{
+       ___team_compute_features(team);
        netdev_change_features(team->dev);
 }
 
 static void team_compute_features(struct team *team)
 {
        mutex_lock(&team->lock);
-       __team_compute_features(team);
+       ___team_compute_features(team);
        mutex_unlock(&team->lock);
+       netdev_change_features(team->dev);
 }
 
 static int team_port_enter(struct team *team, struct team_port *port)
@@ -1198,8 +1203,10 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_dev_open;
        }
 
+       netif_addr_lock_bh(dev);
        dev_uc_sync_multiple(port_dev, dev);
        dev_mc_sync_multiple(port_dev, dev);
+       netif_addr_unlock_bh(dev);
 
        err = vlan_vids_add_by_dev(port_dev, dev);
        if (err) {
index 53759c315b97aeeb54ce5b41fc0e4ec73ee8fac8..877c9516e78174dc41bd1e9d3f4c506d193134a9 100644 (file)
@@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
        if (cdc_ncm_init(dev))
                goto error2;
 
+       /* Some firmwares need a pause here or they will silently fail
+        * to set up the interface properly.  This value was decided
+        * empirically on a Sierra Wireless MC7455 running 02.08.02.00
+        * firmware.
+        */
+       usleep_range(10000, 20000);
+
        /* configure data interface */
        temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
        if (temp) {
index 36cd7f016a8de63490dfcbe89e38f6d9e4e4b7a6..9bbe0161a2f45090fe0de085025fc2c91bf60fc8 100644 (file)
@@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb)
                goto goon;
        }
 
-       if (!count || count < 4)
+       if (count < 4)
                goto goon;
 
        rx_status = buf[count - 2];
index 3f9f6ed3eec4de5c14e43039e08d53b209e52cb4..e9654a6853818933334488c28a98dbb3073a84cf 100644 (file)
 #include <linux/mdio.h>
 #include <linux/usb/cdc.h>
 #include <linux/suspend.h>
+#include <linux/acpi.h>
 
 /* Information for net-next */
 #define NETNEXT_VERSION                "08"
 
 /* Information for net */
-#define NET_VERSION            "3"
+#define NET_VERSION            "5"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define USB_TX_DMA             0xd434
 #define USB_TOLERANCE          0xd490
 #define USB_LPM_CTRL           0xd41a
+#define USB_BMU_RESET          0xd4b0
 #define USB_UPS_CTRL           0xd800
 #define USB_MISC_0             0xd81a
 #define USB_POWER_CUT          0xd80a
 #define TEST_MODE_DISABLE      0x00000001
 #define TX_SIZE_ADJUST1                0x00000100
 
+/* USB_BMU_RESET */
+#define BMU_RESET_EP_IN                0x01
+#define BMU_RESET_EP_OUT       0x02
+
 /* USB_UPS_CTRL */
 #define POWER_CUT              0x0100
 
 /* SRAM_IMPEDANCE */
 #define RX_DRIVING_MASK                0x6000
 
+/* MAC PASSTHRU */
+#define AD_MASK                        0xfee0
+#define EFUSE                  0xcfdb
+#define PASS_THRU_MASK         0x1
+
 enum rtl_register_content {
        _1000bps        = 0x10,
        _100bps         = 0x08,
@@ -619,6 +630,7 @@ struct r8152 {
                int (*eee_get)(struct r8152 *, struct ethtool_eee *);
                int (*eee_set)(struct r8152 *, struct ethtool_eee *);
                bool (*in_nway)(struct r8152 *);
+               void (*autosuspend_en)(struct r8152 *tp, bool enable);
        } rtl_ops;
 
        int intr_interval;
@@ -1030,6 +1042,65 @@ out1:
        return ret;
 }
 
+/* Devices containing RTL8153-AD can support a persistent
+ * host system provided MAC address.
+ * Examples of this are Dell TB15 and Dell WD15 docks
+ */
+static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+{
+       acpi_status status;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       int ret = -EINVAL;
+       u32 ocp_data;
+       unsigned char buf[6];
+
+       /* test for -AD variant of RTL8153 */
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+       if ((ocp_data & AD_MASK) != 0x1000)
+               return -ENODEV;
+
+       /* test for MAC address pass-through bit */
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+       if ((ocp_data & PASS_THRU_MASK) != 1)
+               return -ENODEV;
+
+       /* returns _AUXMAC_#AABBCCDDEEFF# */
+       status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+       obj = (union acpi_object *)buffer.pointer;
+       if (!ACPI_SUCCESS(status))
+               return -ENODEV;
+       if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+               netif_warn(tp, probe, tp->netdev,
+                          "Invalid buffer when reading pass-thru MAC addr: "
+                          "(%d, %d)\n",
+                          obj->type, obj->string.length);
+               goto amacout;
+       }
+       if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
+           strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
+               netif_warn(tp, probe, tp->netdev,
+                          "Invalid header when reading pass-thru MAC addr\n");
+               goto amacout;
+       }
+       ret = hex2bin(buf, obj->string.pointer + 9, 6);
+       if (!(ret == 0 && is_valid_ether_addr(buf))) {
+               netif_warn(tp, probe, tp->netdev,
+                          "Invalid MAC when reading pass-thru MAC addr: "
+                          "%d, %pM\n", ret, buf);
+               ret = -EINVAL;
+               goto amacout;
+       }
+       memcpy(sa->sa_data, buf, 6);
+       ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
+       netif_info(tp, probe, tp->netdev,
+                  "Using pass-thru MAC addr %pM\n", sa->sa_data);
+
+amacout:
+       kfree(obj);
+       return ret;
+}
+
 static int set_ethernet_addr(struct r8152 *tp)
 {
        struct net_device *dev = tp->netdev;
@@ -1038,8 +1109,15 @@ static int set_ethernet_addr(struct r8152 *tp)
 
        if (tp->version == RTL_VER_01)
                ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
-       else
-               ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+       else {
+               /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
+                * or system doesn't provide valid _SB.AMAC this will be
+                * be expected to non-zero
+                */
+               ret = vendor_mac_passthru_addr_read(tp, &sa);
+               if (ret < 0)
+                       ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+       }
 
        if (ret < 0) {
                netif_err(tp, probe, dev, "Get ether addr fail\n");
@@ -2169,7 +2247,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
 static void r8153_set_rx_early_size(struct r8152 *tp)
 {
        u32 mtu = tp->netdev->mtu;
-       u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+       u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
 
        ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
 }
@@ -2290,10 +2368,6 @@ static u32 __rtl_get_wol(struct r8152 *tp)
        u32 ocp_data;
        u32 wolopts = 0;
 
-       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
-       if (!(ocp_data & LAN_WAKE_EN))
-               return 0;
-
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
        if (ocp_data & LINK_ON_WAKE_EN)
                wolopts |= WAKE_PHY;
@@ -2326,15 +2400,13 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
-       ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+       ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
        if (wolopts & WAKE_UCAST)
                ocp_data |= UWF_EN;
        if (wolopts & WAKE_BCAST)
                ocp_data |= BWF_EN;
        if (wolopts & WAKE_MCAST)
                ocp_data |= MWF_EN;
-       if (wolopts & WAKE_ANY)
-               ocp_data |= LAN_WAKE_EN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
 
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
@@ -2403,9 +2475,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
        if (enable) {
                u32 ocp_data;
 
-               r8153_u1u2en(tp, false);
-               r8153_u2p3en(tp, false);
-
                __rtl_set_wol(tp, WAKE_ANY);
 
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2416,7 +2485,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
 
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
        } else {
+               u32 ocp_data;
+
                __rtl_set_wol(tp, tp->saved_wolopts);
+
+               ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+               ocp_data &= ~LINK_OFF_WAKE_EN;
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+               ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+       }
+}
+
+static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
+{
+       rtl_runtime_suspend_enable(tp, enable);
+
+       if (enable) {
+               r8153_u1u2en(tp, false);
+               r8153_u2p3en(tp, false);
+       } else {
                r8153_u2p3en(tp, true);
                r8153_u1u2en(tp, true);
        }
@@ -2456,6 +2546,17 @@ static void r8153_teredo_off(struct r8152 *tp)
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
 }
 
+static void rtl_reset_bmu(struct r8152 *tp)
+{
+       u32 ocp_data;
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
+       ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
+       ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+       ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
+       ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+}
+
 static void r8152_aldps_en(struct r8152 *tp, bool enable)
 {
        if (enable) {
@@ -2681,6 +2782,7 @@ static void r8153_first_init(struct r8152 *tp)
        r8153_hw_phy_cfg(tp);
 
        rtl8152_nic_reset(tp);
+       rtl_reset_bmu(tp);
 
        ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
        ocp_data &= ~NOW_IS_OOB;
@@ -2742,6 +2844,7 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
        rtl_disable(tp);
+       rtl_reset_bmu(tp);
 
        for (i = 0; i < 1000; i++) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2803,6 +2906,7 @@ static void rtl8153_disable(struct r8152 *tp)
 {
        r8153_aldps_en(tp, false);
        rtl_disable(tp);
+       rtl_reset_bmu(tp);
        r8153_aldps_en(tp, true);
        usb_enable_lpm(tp->udev);
 }
@@ -3382,15 +3486,11 @@ static void r8153_init(struct r8152 *tp)
        r8153_power_cut_en(tp, false);
        r8153_u1u2en(tp, true);
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
-                      PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
-                      U1U2_SPDWN_EN | L1_SPDWN_EN);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
-                      PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
-                      TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
-                      EEE_SPDWN_EN);
+       /* MAC clock speed down */
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
 
        r8153_enable_eee(tp);
        r8153_aldps_en(tp, true);
@@ -3497,7 +3597,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
                napi_disable(&tp->napi);
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                        rtl_stop_rx(tp);
-                       rtl_runtime_suspend_enable(tp, true);
+                       tp->rtl_ops.autosuspend_en(tp, true);
                } else {
                        cancel_delayed_work_sync(&tp->schedule);
                        tp->rtl_ops.down(tp);
@@ -3523,7 +3623,7 @@ static int rtl8152_resume(struct usb_interface *intf)
 
        if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-                       rtl_runtime_suspend_enable(tp, false);
+                       tp->rtl_ops.autosuspend_en(tp, false);
                        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
                        napi_disable(&tp->napi);
                        set_bit(WORK_ENABLE, &tp->flags);
@@ -3542,7 +3642,7 @@ static int rtl8152_resume(struct usb_interface *intf)
                usb_submit_urb(tp->intr_urb, GFP_KERNEL);
        } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                if (tp->netdev->flags & IFF_UP)
-                       rtl_runtime_suspend_enable(tp, false);
+                       tp->rtl_ops.autosuspend_en(tp, false);
                clear_bit(SELECTIVE_SUSPEND, &tp->flags);
        }
 
@@ -4122,6 +4222,7 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->eee_get            = r8152_get_eee;
                ops->eee_set            = r8152_set_eee;
                ops->in_nway            = rtl8152_in_nway;
+               ops->autosuspend_en     = rtl_runtime_suspend_enable;
                break;
 
        case RTL_VER_03:
@@ -4137,6 +4238,7 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->eee_get            = r8153_get_eee;
                ops->eee_set            = r8153_set_eee;
                ops->in_nway            = rtl8153_in_nway;
+               ops->autosuspend_en     = rtl8153_runtime_enable;
                break;
 
        default:
@@ -4323,3 +4425,4 @@ module_usb_driver(rtl8152_driver);
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
index d9d2806a47b12aa0e7a7e7838e31c100fe22f1d7..dc989a8b5afbd62b07cd65a2fa5edde67d3cc5fb 100644 (file)
@@ -61,6 +61,8 @@
 #define SUSPEND_ALLMODES               (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
                                         SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
 
+#define CARRIER_CHECK_DELAY (2 * HZ)
+
 struct smsc95xx_priv {
        u32 mac_cr;
        u32 hash_hi;
@@ -69,6 +71,9 @@ struct smsc95xx_priv {
        spinlock_t mac_cr_lock;
        u8 features;
        u8 suspend_flags;
+       bool link_ok;
+       struct delayed_work carrier_check;
+       struct usbnet *dev;
 };
 
 static bool turbo_mode = true;
@@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
                            intdata);
 }
 
+static void set_carrier(struct usbnet *dev, bool link)
+{
+       struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+       if (pdata->link_ok == link)
+               return;
+
+       pdata->link_ok = link;
+
+       if (link)
+               usbnet_link_change(dev, 1, 0);
+       else
+               usbnet_link_change(dev, 0, 0);
+}
+
+static void check_carrier(struct work_struct *work)
+{
+       struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
+                                               carrier_check.work);
+       struct usbnet *dev = pdata->dev;
+       int ret;
+
+       if (pdata->suspend_flags != 0)
+               return;
+
+       ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
+       if (ret < 0) {
+               netdev_warn(dev->net, "Failed to read MII_BMSR\n");
+               return;
+       }
+       if (ret & BMSR_LSTATUS)
+               set_carrier(dev, 1);
+       else
+               set_carrier(dev, 0);
+
+       schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+}
+
 /* Enable or disable Tx & Rx checksum offload engines */
 static int smsc95xx_set_features(struct net_device *netdev,
        netdev_features_t features)
@@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->flags |= IFF_MULTICAST;
        dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+       pdata->dev = dev;
+       INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
+       schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+
        return 0;
 }
 
 static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
        if (pdata) {
+               cancel_delayed_work(&pdata->carrier_check);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
                pdata = NULL;
@@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
 
        /* do this first to ensure it's cleared even in error case */
        pdata->suspend_flags = 0;
+       schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
 
        if (suspend_flags & SUSPEND_ALLMODES) {
                /* clear wake-up sources */
index 61ba464049374593316e1c0b869774861ee79b5e..6086a0163249c5e05d8cdf92b1b834c3da4a71a7 100644 (file)
@@ -395,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
        dev->hard_mtu = net->mtu + net->hard_header_len;
        if (dev->rx_urb_size == old_hard_mtu) {
                dev->rx_urb_size = dev->hard_mtu;
-               if (dev->rx_urb_size > old_rx_urb_size)
+               if (dev->rx_urb_size > old_rx_urb_size) {
+                       usbnet_pause_rx(dev);
                        usbnet_unlink_rx_urbs(dev);
+                       usbnet_resume_rx(dev);
+               }
        }
 
        /* max qlen depend on hard_mtu and rx_urb_size */
@@ -1508,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
        } else if (netif_running (dev->net) &&
                   netif_device_present (dev->net) &&
                   netif_carrier_ok(dev->net) &&
-                  !timer_pending (&dev->delay) &&
-                  !test_bit (EVENT_RX_HALT, &dev->flags)) {
+                  !timer_pending(&dev->delay) &&
+                  !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
+                  !test_bit(EVENT_RX_HALT, &dev->flags)) {
                int     temp = dev->rxq.qlen;
 
                if (temp < RX_QLEN(dev)) {
index 49d84e54034338ad230183eb9c414c5802c2944f..e0638e556fe7d4a8d5e7dede9a474717dc4ac646 100644 (file)
@@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev)
 
        virtio_device_ready(vdev);
 
-       /* Last of all, set up some receive buffers. */
-       for (i = 0; i < vi->curr_queue_pairs; i++) {
-               try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
-
-               /* If we didn't even get one input buffer, we're useless. */
-               if (vi->rq[i].vq->num_free ==
-                   virtqueue_get_vring_size(vi->rq[i].vq)) {
-                       free_unused_bufs(vi);
-                       err = -ENOMEM;
-                       goto free_recv_bufs;
-               }
-       }
-
        vi->nb.notifier_call = &virtnet_cpu_callback;
        err = register_hotcpu_notifier(&vi->nb);
        if (err) {
                pr_debug("virtio_net: registering cpu notifier failed\n");
-               goto free_recv_bufs;
+               goto free_unregister_netdev;
        }
 
        /* Assume link up if device can't report link status,
@@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev)
 
        return 0;
 
-free_recv_bufs:
+free_unregister_netdev:
        vi->vdev->config->reset(vdev);
 
-       free_receive_bufs(vi);
        unregister_netdev(dev);
 free_vqs:
        cancel_delayed_work_sync(&vi->refill);
index db8022ae415bd234c19c8348dc6f90697d6ad840..08885bc8d6db6a11a775b30d3a39b4e9bc826f8e 100644 (file)
@@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
                                rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
 
                                segCnt = rcdlro->segCnt;
-                               BUG_ON(segCnt <= 1);
+                               WARN_ON_ONCE(segCnt == 0);
                                mss = rcdlro->mss;
                                if (unlikely(segCnt <= 1))
                                        segCnt = 0;
index c4825392d64b6c8de17822532d113689f88d8bb7..3d2b64e63408d69ae86fb15b2b18368ab01b5025 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.8.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040800
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index dff08842f26d034dc21a7a73b8a1c9b0cb72236a..8bd8c7e1ee8724c56c670850c9ecc20590a68707 100644 (file)
@@ -304,7 +304,7 @@ static int vrf_rt6_create(struct net_device *dev)
        dst_hold(&rt6->dst);
 
        rt6->rt6i_table = rt6i_table;
-       rt6->dst.output = vrf_output6;
+       rt6->dst.output = vrf_output6;
        rcu_assign_pointer(vrf->rt6, rt6);
 
        rc = 0;
@@ -403,7 +403,7 @@ static int vrf_rtable_create(struct net_device *dev)
        if (!rth)
                return -ENOMEM;
 
-       rth->dst.output = vrf_output;
+       rth->dst.output = vrf_output;
        rth->rt_table_id = vrf->tb_id;
 
        rcu_assign_pointer(vrf->rth, rth);
index 8ff30c3bdfceab5e5aca07bfaeb8635bec96ae0e..b3b9db68f758a2062a2f6bde2bfb9805d271cc6c 100644 (file)
@@ -2952,30 +2952,6 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        return 0;
 }
 
-struct net_device *vxlan_dev_create(struct net *net, const char *name,
-                                   u8 name_assign_type, struct vxlan_config *conf)
-{
-       struct nlattr *tb[IFLA_MAX+1];
-       struct net_device *dev;
-       int err;
-
-       memset(&tb, 0, sizeof(tb));
-
-       dev = rtnl_create_link(net, name, name_assign_type,
-                              &vxlan_link_ops, tb);
-       if (IS_ERR(dev))
-               return dev;
-
-       err = vxlan_dev_configure(net, dev, conf);
-       if (err < 0) {
-               free_netdev(dev);
-               return ERR_PTR(err);
-       }
-
-       return dev;
-}
-EXPORT_SYMBOL_GPL(vxlan_dev_create);
-
 static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
@@ -3086,6 +3062,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
                conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
 
+       if (tb[IFLA_MTU])
+               conf.mtu = nla_get_u32(tb[IFLA_MTU]);
+
        err = vxlan_dev_configure(src_net, dev, &conf);
        switch (err) {
        case -ENODEV:
@@ -3265,6 +3244,40 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
        .get_link_net   = vxlan_get_link_net,
 };
 
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+                                   u8 name_assign_type,
+                                   struct vxlan_config *conf)
+{
+       struct nlattr *tb[IFLA_MAX + 1];
+       struct net_device *dev;
+       int err;
+
+       memset(&tb, 0, sizeof(tb));
+
+       dev = rtnl_create_link(net, name, name_assign_type,
+                              &vxlan_link_ops, tb);
+       if (IS_ERR(dev))
+               return dev;
+
+       err = vxlan_dev_configure(net, dev, conf);
+       if (err < 0) {
+               free_netdev(dev);
+               return ERR_PTR(err);
+       }
+
+       err = rtnl_configure_link(dev, NULL);
+       if (err < 0) {
+               LIST_HEAD(list_kill);
+
+               vxlan_dellink(dev, &list_kill);
+               unregister_netdevice_many(&list_kill);
+               return ERR_PTR(err);
+       }
+
+       return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
 static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
                                             struct net_device *dev)
 {
index 49af62428c8864571593a98c0682f97170cfecd2..a92a0ba829f5c19c8237b539be0b9db184efe31b 100644 (file)
@@ -1083,7 +1083,7 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
                        }
 
                        ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
-                                       ar->running_fw->fw_file.fw_features,
+                                       fw_file->fw_features,
                                        sizeof(fw_file->fw_features));
                        break;
                case ATH10K_FW_IE_FW_IMAGE:
index cc979a4faeb02621b230842dc8cef71a55783bdf..813cdd2621a168ff5696b723ca924f874b2a3a57 100644 (file)
@@ -1904,7 +1904,6 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                        return;
                }
        }
-       ath10k_htt_rx_msdu_buff_replenish(htt);
 }
 
 static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
index 6dd1d26b357f5dc8076c72ef3f2c05f3594ff2ce..4040f9413e86e8d06df8e94e31759cd1c8096c90 100644 (file)
@@ -679,10 +679,10 @@ static int ath10k_peer_create(struct ath10k *ar,
 
        peer = ath10k_peer_find(ar, vdev_id, addr);
        if (!peer) {
+               spin_unlock_bh(&ar->data_lock);
                ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
                            addr, vdev_id);
                ath10k_wmi_peer_delete(ar, vdev_id, addr);
-               spin_unlock_bh(&ar->data_lock);
                return -ENOENT;
        }
 
index 9272ca90632b983b5eec1a57612ac29307f15973..80ff69f9922977e6810e34efa62641ea72316318 100644 (file)
@@ -1122,12 +1122,12 @@ enum {
 #define AR9300_NUM_GPIO                          16
 #define AR9330_NUM_GPIO                                 16
 #define AR9340_NUM_GPIO                                 23
-#define AR9462_NUM_GPIO                                 10
+#define AR9462_NUM_GPIO                                 14
 #define AR9485_NUM_GPIO                                 12
 #define AR9531_NUM_GPIO                                 18
 #define AR9550_NUM_GPIO                                 24
 #define AR9561_NUM_GPIO                                 23
-#define AR9565_NUM_GPIO                                 12
+#define AR9565_NUM_GPIO                                 14
 #define AR9580_NUM_GPIO                                 16
 #define AR7010_NUM_GPIO                          16
 
@@ -1139,12 +1139,12 @@ enum {
 #define AR9300_GPIO_MASK                        0x0000F4FF
 #define AR9330_GPIO_MASK                        0x0000F4FF
 #define AR9340_GPIO_MASK                        0x0000000F
-#define AR9462_GPIO_MASK                        0x000003FF
+#define AR9462_GPIO_MASK                        0x00003FFF
 #define AR9485_GPIO_MASK                        0x00000FFF
 #define AR9531_GPIO_MASK                        0x0000000F
 #define AR9550_GPIO_MASK                        0x0000000F
 #define AR9561_GPIO_MASK                        0x0000000F
-#define AR9565_GPIO_MASK                        0x00000FFF
+#define AR9565_GPIO_MASK                        0x00003FFF
 #define AR9580_GPIO_MASK                        0x0000F4FF
 #define AR7010_GPIO_MASK                        0x0000FFFF
 
index d0631b6cfd5302ee7d73c25937f29f8184e0be83..62f475e31077ca1fa1df15863f02ee0911c46997 100644 (file)
@@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                           const u8 *mac, struct station_info *sinfo)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
+       struct brcmf_scb_val_le scb_val;
        s32 err = 0;
        struct brcmf_sta_info_le sta_info_le;
        u32 sta_flags;
        u32 is_tdls_peer;
        s32 total_rssi;
        s32 count_rssi;
+       int rssi;
        u32 i;
 
        brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
@@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                        sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
                        total_rssi /= count_rssi;
                        sinfo->signal = total_rssi;
+               } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+                       &ifp->vif->sme_state)) {
+                       memset(&scb_val, 0, sizeof(scb_val));
+                       err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+                                                    &scb_val, sizeof(scb_val));
+                       if (err) {
+                               brcmf_err("Could not get rssi (%d)\n", err);
+                               goto done;
+                       } else {
+                               rssi = le32_to_cpu(scb_val.val);
+                               sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+                               sinfo->signal = rssi;
+                               brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+                       }
                }
        }
 done:
index 68f1ce02f4bf83d8d787e256cb5d74628cc4040e..2b9a2bc429d6fc23a49aecad27e36e14ac5408ce 100644 (file)
@@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
                brcmu_pkt_buf_free_skb(skb);
                return;
        }
+
+       skb->protocol = eth_type_trans(skb, ifp->ndev);
        brcmf_netif_rx(ifp, skb);
 }
 
index e5f267b21316ca413f41ed0741a9144c4e7d0f17..18a8474b5760ca6ce6c5636a0e39770232ae818a 100644 (file)
@@ -3851,8 +3851,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
        if (idx != 0)
                return -ENOENT;
 
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+       if (!fw_has_capa(&mvm->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return -ENOENT;
 
        mutex_lock(&mvm->mutex);
@@ -3898,8 +3898,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+       if (!fw_has_capa(&mvm->fw->ucode_capa,
+                        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return;
 
        /* if beacon filtering isn't on mac80211 does it anyway */
index ac2c5718e454d732e0ac3ed42a716e07ec8df402..2c61516d06fff52318d972feb0defbab41654d62 100644 (file)
@@ -581,7 +581,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
                            struct iwl_rx_mpdu_desc *desc)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_sta *mvm_sta;
        struct iwl_mvm_baid_data *baid_data;
        struct iwl_mvm_reorder_buffer *buffer;
        struct sk_buff *tail;
@@ -604,6 +604,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
        if (WARN_ON(IS_ERR_OR_NULL(sta)))
                return false;
 
+       mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
        /* not a data packet */
        if (!ieee80211_is_data_qos(hdr->frame_control) ||
            is_multicast_ether_addr(hdr->addr1))
index 6f609dd5c2220dc3b038e4d74b6407cee27a990e..e78fc567ff7d83b26161fd709092493107107cf9 100644 (file)
@@ -1222,7 +1222,7 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
        return -EIO;
 }
 
-#define SCAN_TIMEOUT (16 * HZ)
+#define SCAN_TIMEOUT (20 * HZ)
 
 void iwl_mvm_scan_timeout(unsigned long data)
 {
index fea4d3437e2f29543a2322af44d7892817125fe2..b23ab4a4504f7f140877ff734682b92607e276b6 100644 (file)
@@ -1852,12 +1852,18 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
            mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
                u8 sta_id = mvmvif->ap_sta_id;
 
+               sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+                                           lockdep_is_held(&mvm->mutex));
+
                /*
                 * It is possible that the 'sta' parameter is NULL,
                 * for example when a GTK is removed - the sta_id will then
                 * be the AP ID, and no station was passed by mac80211.
                 */
-               return iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+               if (IS_ERR_OR_NULL(sta))
+                       return NULL;
+
+               return iwl_mvm_sta_from_mac80211(sta);
        }
 
        return NULL;
@@ -1955,6 +1961,14 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
                struct ieee80211_key_seq seq;
                const u8 *pn;
 
+               switch (keyconf->cipher) {
+               case WLAN_CIPHER_SUITE_AES_CMAC:
+                       igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
                memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                pn = seq.aes_cmac.pn;
index 9ed0ed1bf51481e4458dbeae2d105731bbf0f575..4dd5adcdd29bafae3f1432b87758ea2468189dbe 100644 (file)
@@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
        if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
            !info->attrs[HWSIM_ATTR_FLAGS] ||
            !info->attrs[HWSIM_ATTR_COOKIE] ||
+           !info->attrs[HWSIM_ATTR_SIGNAL] ||
            !info->attrs[HWSIM_ATTR_TX_INFO])
                goto out;
 
index fe19ace0d6a01bf75ff0f6e2d9ba38b6c32b2de3..b04cf30f3959221c99a4c53cd5f12345fee27017 100644 (file)
@@ -1149,7 +1149,7 @@ static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
 
                for (i = 0; i < retry; i++) {
                        path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
-                       if (path_a_ok == 0x03) {
+                       if (path_b_ok == 0x03) {
                                val32 = rtl8xxxu_read32(priv,
                                                        REG_RX_POWER_BEFORE_IQK_B_2);
                                result[t][6] = (val32 >> 16) & 0x3ff;
index 0f48048b865407fe8d984b1057bd262ccd28dc7f..3a0faa8fe9d46deef63514afeba5dbbb9ecd7d14 100644 (file)
@@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
 void rtl_addr_delay(u32 addr)
 {
        if (addr == 0xfe)
-               msleep(50);
+               mdelay(50);
        else if (addr == 0xfd)
                msleep(5);
        else if (addr == 0xfc)
@@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
                rtl_addr_delay(addr);
        } else {
                rtl_set_rfreg(hw, rfpath, addr, mask, data);
-               usleep_range(1, 2);
+               udelay(1);
        }
 }
 EXPORT_SYMBOL(rtl_rfreg_delay);
@@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
                rtl_addr_delay(addr);
        } else {
                rtl_set_bbreg(hw, addr, MASKDWORD, data);
-               usleep_range(1, 2);
+               udelay(1);
        }
 }
 EXPORT_SYMBOL(rtl_bb_delay);
index f7718ec685fadaee4e1460210b3733b5950acfc5..cea8350fbc7ec2f2e617199fd2bc8b8cb1df2fbb 100644 (file)
@@ -344,6 +344,8 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
 {
        u64 checksum, offset;
+       unsigned long align;
+       enum nd_pfn_mode mode;
        struct nd_namespace_io *nsio;
        struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
        struct nd_namespace_common *ndns = nd_pfn->ndns;
@@ -386,22 +388,50 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
                return -ENXIO;
        }
 
+       align = le32_to_cpu(pfn_sb->align);
+       offset = le64_to_cpu(pfn_sb->dataoff);
+       if (align == 0)
+               align = 1UL << ilog2(offset);
+       mode = le32_to_cpu(pfn_sb->mode);
+
        if (!nd_pfn->uuid) {
-               /* from probe we allocate */
+               /*
+                * When probing a namepace via nd_pfn_probe() the uuid
+                * is NULL (see: nd_pfn_devinit()) we init settings from
+                * pfn_sb
+                */
                nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
                if (!nd_pfn->uuid)
                        return -ENOMEM;
+               nd_pfn->align = align;
+               nd_pfn->mode = mode;
        } else {
-               /* from init we validate */
+               /*
+                * When probing a pfn / dax instance we validate the
+                * live settings against the pfn_sb
+                */
                if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
                        return -ENODEV;
+
+               /*
+                * If the uuid validates, but other settings mismatch
+                * return EINVAL because userspace has managed to change
+                * the configuration without specifying new
+                * identification.
+                */
+               if (nd_pfn->align != align || nd_pfn->mode != mode) {
+                       dev_err(&nd_pfn->dev,
+                                       "init failed, settings mismatch\n");
+                       dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
+                                       nd_pfn->align, align, nd_pfn->mode,
+                                       mode);
+                       return -EINVAL;
+               }
        }
 
-       if (nd_pfn->align == 0)
-               nd_pfn->align = le32_to_cpu(pfn_sb->align);
-       if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
+       if (align > nvdimm_namespace_capacity(ndns)) {
                dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
-                               nd_pfn->align, nvdimm_namespace_capacity(ndns));
+                               align, nvdimm_namespace_capacity(ndns));
                return -EINVAL;
        }
 
@@ -411,7 +441,6 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
         * namespace has changed since the pfn superblock was
         * established.
         */
-       offset = le64_to_cpu(pfn_sb->dataoff);
        nsio = to_nd_namespace_io(&ndns->dev);
        if (offset >= resource_size(&nsio->res)) {
                dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
@@ -419,10 +448,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
                return -EBUSY;
        }
 
-       if ((nd_pfn->align && !IS_ALIGNED(offset, nd_pfn->align))
+       if ((align && !IS_ALIGNED(offset, align))
                        || !IS_ALIGNED(offset, PAGE_SIZE)) {
-               dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
-                               offset);
+               dev_err(&nd_pfn->dev,
+                               "bad offset: %#llx dax disabled align: %#lx\n",
+                               offset, align);
                return -ENXIO;
        }
 
@@ -502,7 +532,6 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
        res->start += start_pad;
        res->end -= end_trunc;
 
-       nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
        if (nd_pfn->mode == PFN_MODE_RAM) {
                if (offset < SZ_8K)
                        return ERR_PTR(-EINVAL);
index 1a51584a382bf28dcf567a3fb039bc1a571cc144..d5fb55c0a9d95cdd2e6ac9cc99ca17f44b2d6ef7 100644 (file)
@@ -1394,19 +1394,22 @@ static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
        return nsa->ns_id - nsb->ns_id;
 }
 
-static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
-       struct nvme_ns *ns;
-
-       lockdep_assert_held(&ctrl->namespaces_mutex);
+       struct nvme_ns *ns, *ret = NULL;
 
+       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               if (ns->ns_id == nsid)
-                       return ns;
+               if (ns->ns_id == nsid) {
+                       kref_get(&ns->kref);
+                       ret = ns;
+                       break;
+               }
                if (ns->ns_id > nsid)
                        break;
        }
-       return NULL;
+       mutex_unlock(&ctrl->namespaces_mutex);
+       return ret;
 }
 
 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
@@ -1415,8 +1418,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        struct gendisk *disk;
        int node = dev_to_node(ctrl->dev);
 
-       lockdep_assert_held(&ctrl->namespaces_mutex);
-
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
                return;
@@ -1457,7 +1458,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        if (nvme_revalidate_disk(ns->disk))
                goto out_free_disk;
 
-       list_add_tail_rcu(&ns->list, &ctrl->namespaces);
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_add_tail(&ns->list, &ctrl->namespaces);
+       mutex_unlock(&ctrl->namespaces_mutex);
+
        kref_get(&ctrl->kref);
        if (ns->type == NVME_NS_LIGHTNVM)
                return;
@@ -1480,8 +1484,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
-       lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
        if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
                return;
 
@@ -1494,8 +1496,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
        }
+
+       mutex_lock(&ns->ctrl->namespaces_mutex);
        list_del_init(&ns->list);
-       synchronize_rcu();
+       mutex_unlock(&ns->ctrl->namespaces_mutex);
+
        nvme_put_ns(ns);
 }
 
@@ -1503,10 +1508,11 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
        struct nvme_ns *ns;
 
-       ns = nvme_find_ns(ctrl, nsid);
+       ns = nvme_find_get_ns(ctrl, nsid);
        if (ns) {
                if (revalidate_disk(ns->disk))
                        nvme_ns_remove(ns);
+               nvme_put_ns(ns);
        } else
                nvme_alloc_ns(ctrl, nsid);
 }
@@ -1535,9 +1541,11 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
                        nvme_validate_ns(ctrl, nsid);
 
                        while (++prev < nsid) {
-                               ns = nvme_find_ns(ctrl, prev);
-                               if (ns)
+                               ns = nvme_find_get_ns(ctrl, prev);
+                               if (ns) {
                                        nvme_ns_remove(ns);
+                                       nvme_put_ns(ns);
+                               }
                        }
                }
                nn -= j;
@@ -1552,8 +1560,6 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
        struct nvme_ns *ns, *next;
        unsigned i;
 
-       lockdep_assert_held(&ctrl->namespaces_mutex);
-
        for (i = 1; i <= nn; i++)
                nvme_validate_ns(ctrl, i);
 
@@ -1576,7 +1582,6 @@ static void nvme_scan_work(struct work_struct *work)
        if (nvme_identify_ctrl(ctrl, &id))
                return;
 
-       mutex_lock(&ctrl->namespaces_mutex);
        nn = le32_to_cpu(id->nn);
        if (ctrl->vs >= NVME_VS(1, 1) &&
            !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -1585,6 +1590,7 @@ static void nvme_scan_work(struct work_struct *work)
        }
        nvme_scan_ns_sequential(ctrl, nn);
  done:
+       mutex_lock(&ctrl->namespaces_mutex);
        list_sort(NULL, &ctrl->namespaces, ns_cmp);
        mutex_unlock(&ctrl->namespaces_mutex);
        kfree(id);
@@ -1604,6 +1610,11 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_queue_scan);
 
+/*
+ * This function iterates the namespace list unlocked to allow recovery from
+ * controller failure. It is up to the caller to ensure the namespace list is
+ * not modified by scan work while this function is executing.
+ */
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns, *next;
@@ -1617,10 +1628,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
        if (ctrl->state == NVME_CTRL_DEAD)
                nvme_kill_queues(ctrl);
 
-       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
                nvme_ns_remove(ns);
-       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
 
@@ -1791,11 +1800,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
-               if (!kref_get_unless_zero(&ns->kref))
-                       continue;
-
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
                /*
                 * Revalidating a dead namespace sets capacity to 0. This will
                 * end buffered writers dirtying pages that can't be synced.
@@ -1806,10 +1812,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
                blk_set_queue_dying(ns->queue);
                blk_mq_abort_requeue_list(ns->queue);
                blk_mq_start_stopped_hw_queues(ns->queue, true);
-
-               nvme_put_ns(ns);
        }
-       rcu_read_unlock();
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_kill_queues);
 
@@ -1817,8 +1821,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
                spin_lock_irq(ns->queue->queue_lock);
                queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
                spin_unlock_irq(ns->queue->queue_lock);
@@ -1826,7 +1830,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
                blk_mq_cancel_requeue_work(ns->queue);
                blk_mq_stop_hw_queues(ns->queue);
        }
-       rcu_read_unlock();
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_stop_queues);
 
@@ -1834,13 +1838,13 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
                queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
                blk_mq_start_stopped_hw_queues(ns->queue, true);
                blk_mq_kick_requeue_list(ns->queue);
        }
-       rcu_read_unlock();
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);
 
index 78dca3193ca4ccc61df9784ff7699834c61e08b2..befac5b19490ee91e13dec00c471931005b871b7 100644 (file)
@@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
 
 static void nvme_dev_unmap(struct nvme_dev *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev->dev);
+       int bars;
+
        if (dev->bar)
                iounmap(dev->bar);
-       pci_release_regions(to_pci_dev(dev->dev));
+
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       pci_release_selected_regions(pdev, bars);
 }
 
 static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
 
        return 0;
   release:
-       pci_release_regions(pdev);
+       pci_release_selected_regions(pdev, bars);
        return -ENODEV;
 }
 
index 14f2f8c7c2607e603337b30a37117195f95f0880..33daffc4392c646a25455ebe5bf7f2ecdcc261b9 100644 (file)
@@ -395,7 +395,7 @@ static int unflatten_dt_nodes(const void *blob,
                              struct device_node **nodepp)
 {
        struct device_node *root;
-       int offset = 0, depth = 0;
+       int offset = 0, depth = 0, initial_depth = 0;
 #define FDT_MAX_DEPTH  64
        unsigned int fpsizes[FDT_MAX_DEPTH];
        struct device_node *nps[FDT_MAX_DEPTH];
@@ -405,11 +405,22 @@ static int unflatten_dt_nodes(const void *blob,
        if (nodepp)
                *nodepp = NULL;
 
+       /*
+        * We're unflattening device sub-tree if @dad is valid. There are
+        * possibly multiple nodes in the first level of depth. We need
+        * set @depth to 1 to make fdt_next_node() happy as it bails
+        * immediately when negative @depth is found. Otherwise, the device
+        * nodes except the first one won't be unflattened successfully.
+        */
+       if (dad)
+               depth = initial_depth = 1;
+
        root = dad;
        fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
        nps[depth] = dad;
+
        for (offset = 0;
-            offset >= 0 && depth >= 0;
+            offset >= 0 && depth >= initial_depth;
             offset = fdt_next_node(blob, offset, &depth)) {
                if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
                        continue;
index e7bfc175b8e1e9a0a171a3173191861399d21b1d..6ec743faabe8af781294eb2e275fb8eb16fd4344 100644 (file)
@@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 EXPORT_SYMBOL_GPL(of_irq_to_resource);
 
 /**
- * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
  * @dev: pointer to device tree node
- * @index: zero-based index of the irq
- *
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created.
+ * @index: zero-based index of the IRQ
  *
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
  */
 int of_irq_get(struct device_node *dev, int index)
 {
@@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
 EXPORT_SYMBOL_GPL(of_irq_get);
 
 /**
- * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
+ * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
  * @dev: pointer to device tree node
- * @name: irq name
+ * @name: IRQ name
  *
- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
- * is not yet created, or error code in case of any other failure.
+ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
+ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
+ * of any other failure.
  */
 int of_irq_get_byname(struct device_node *dev, const char *name)
 {
index 2b2291bfc38f8409271c735d21ab0a62adcd9179..06af99f64ad87a4cbdab10b6e531a67bad94d82e 100644 (file)
@@ -128,8 +128,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
        }
 
        /* Need adjust the alignment to satisfy the CMA requirement */
-       if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool"))
-               align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+       if (IS_ENABLED(CONFIG_CMA)
+           && of_flat_dt_is_compatible(node, "shared-dma-pool")
+           && of_get_flat_dt_prop(node, "reusable", NULL)
+           && !of_get_flat_dt_prop(node, "no-map", NULL)) {
+               unsigned long order =
+                       max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
+
+               align = max(align, (phys_addr_t)PAGE_SIZE << order);
+       }
 
        prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
        if (prop) {
index dfbab61a1b473d72cb6e631c6738d1c60fb99d96..1fa3a3219c454e23e0081b7073acd342909aacda 100644 (file)
@@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
                else
                        pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
                                              *(u16 *)buf);
-               buf += 2;
+               buf += 4;
        }
-       len += 2;
+       len += 4;
 
        /*
         * If we have any Low Priority VCs and a VC Arbitration Table Offset
index f2d01d4d93645a0b029561ba2febf7fed56ec276..140436a046c03f992c3fa3fd71ed485bfdb7d109 100644 (file)
@@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
 
                /* For SPIs, we need to track the affinity per IRQ */
                if (using_spi) {
-                       if (i >= pdev->num_resources) {
-                               of_node_put(dn);
+                       if (i >= pdev->num_resources)
                                break;
-                       }
 
                        irqs[i] = cpu;
                }
 
                /* Keep track of the CPUs containing this PMU type */
                cpumask_set_cpu(cpu, &pmu->supported_cpus);
-               of_node_put(dn);
                i++;
        } while (1);
 
@@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev,
 
        armpmu_init(pmu);
 
-       if (!__oprofile_cpu_pmu)
-               __oprofile_cpu_pmu = pmu;
-
        pmu->plat_device = pdev;
 
        if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
@@ -1016,8 +1010,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
                if (!ret)
                        ret = init_fn(pmu);
        } else {
-               ret = probe_current_pmu(pmu, probe_table);
                cpumask_setall(&pmu->supported_cpus);
+               ret = probe_current_pmu(pmu, probe_table);
        }
 
        if (ret) {
@@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
        if (ret)
                goto out_destroy;
 
+       if (!__oprofile_cpu_pmu)
+               __oprofile_cpu_pmu = pmu;
+
        pr_info("enabled with %s PMU driver, %d counters available\n",
                        pmu->name, pmu->num_events);
 
@@ -1043,6 +1040,7 @@ out_destroy:
 out_free:
        pr_info("%s: failed to register PMU devices!\n",
                of_node_full_name(node));
+       kfree(pmu->irq_affinity);
        kfree(pmu);
        return ret;
 }
index 95ab6b2a0de5e1bc8dda4bdd640d2ff725f436ae..58dff80e9386d67d43ac5a1b9f67ba55f7c49788 100644 (file)
@@ -109,8 +109,8 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev)
        }
 
        usb2->phy = devm_phy_create(dev, NULL, &ops);
-       if (IS_ERR(dev))
-               return PTR_ERR(dev);
+       if (IS_ERR(usb2->phy))
+               return PTR_ERR(usb2->phy);
 
        phy_set_drvdata(usb2->phy, usb2);
        platform_set_drvdata(pdev, usb2);
index cc093ebfda9435a28ec627ca1f022b877c42b342..8b851f718123a1110016cfc462385b85a9da3bac 100644 (file)
@@ -233,8 +233,12 @@ static inline int __is_running(const struct exynos_mipi_phy_desc *data,
                        struct exynos_mipi_video_phy *state)
 {
        u32 val;
+       int ret;
+
+       ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
+       if (ret)
+               return 0;
 
-       regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
        return val & data->resetn_val;
 }
 
index 3acd2a1808dfbf9ccfa5d58467b6defdb793c2bb..213e2e15339c44dff742b2394455d7138e0d524e 100644 (file)
@@ -1143,7 +1143,8 @@ static int miphy28lp_probe_resets(struct device_node *node,
        struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
        int err;
 
-       miphy_phy->miphy_rst = of_reset_control_get(node, "miphy-sw-rst");
+       miphy_phy->miphy_rst =
+               of_reset_control_get_shared(node, "miphy-sw-rst");
 
        if (IS_ERR(miphy_phy->miphy_rst)) {
                dev_err(miphy_dev->dev,
index 76bb88f0700a72b8d06480daf4d8e2aa8b0b51de..4be3f5dbbc9f1d8b9dd416e8b930cb24b6ee3919 100644 (file)
@@ -144,12 +144,6 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
        extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
 }
 
-static bool rcar_gen3_check_vbus(struct rcar_gen3_chan *ch)
-{
-       return !!(readl(ch->base + USB2_ADPCTRL) &
-                 USB2_ADPCTRL_OTGSESSVLD);
-}
-
 static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
 {
        return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
@@ -157,13 +151,7 @@ static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
 
 static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
 {
-       bool is_host = true;
-
-       /* B-device? */
-       if (rcar_gen3_check_id(ch) && rcar_gen3_check_vbus(ch))
-               is_host = false;
-
-       if (is_host)
+       if (!rcar_gen3_check_id(ch))
                rcar_gen3_init_for_host(ch);
        else
                rcar_gen3_init_for_peri(ch);
index 793ecb6d87bcaa2a56a914b2745bea6453929637..8b267a746576f03be24adf5dec028d45289e31ea 100644 (file)
@@ -90,7 +90,7 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
                return -ENODEV;
 
        dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
-       if (IS_ERR(dp))
+       if (!dp)
                return -ENOMEM;
 
        dp->dev = dev;
index 1d5ae5f8ef694cf9ca389dd3a0742bec189a6955..b1f44ab669fb5cef099a54daf07eeb26f76fc770 100644 (file)
@@ -105,13 +105,13 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
        phy_dev->dev = dev;
        dev_set_drvdata(dev, phy_dev);
 
-       phy_dev->rstc = devm_reset_control_get(dev, "global");
+       phy_dev->rstc = devm_reset_control_get_shared(dev, "global");
        if (IS_ERR(phy_dev->rstc)) {
                dev_err(dev, "failed to ctrl picoPHY reset\n");
                return PTR_ERR(phy_dev->rstc);
        }
 
-       phy_dev->rstport = devm_reset_control_get(dev, "port");
+       phy_dev->rstport = devm_reset_control_get_exclusive(dev, "port");
        if (IS_ERR(phy_dev->rstport)) {
                dev_err(dev, "failed to ctrl picoPHY reset\n");
                return PTR_ERR(phy_dev->rstport);
index bae54f7a1f4877b88297858783b0b864783ec1fe..de3101fbbf40e1f3c77e7a1f22c6929c2db8fb01 100644 (file)
@@ -175,7 +175,7 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
 {
        struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
        u32 temp, usbc_bit = BIT(phy->index * 2);
-       void *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
+       void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
        int i;
 
        mutex_lock(&phy_data->mutex);
@@ -514,9 +514,9 @@ static int sun4i_usb_phy_remove(struct platform_device *pdev)
 
        if (data->vbus_power_nb_registered)
                power_supply_unreg_notifier(&data->vbus_power_nb);
-       if (data->id_det_irq >= 0)
+       if (data->id_det_irq > 0)
                devm_free_irq(dev, data->id_det_irq, data);
-       if (data->vbus_det_irq >= 0)
+       if (data->vbus_det_irq > 0)
                devm_free_irq(dev, data->vbus_det_irq, data);
 
        cancel_delayed_work_sync(&data->detect);
@@ -645,11 +645,11 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
 
        data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
        data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
-       if ((data->id_det_gpio && data->id_det_irq < 0) ||
-           (data->vbus_det_gpio && data->vbus_det_irq < 0))
+       if ((data->id_det_gpio && data->id_det_irq <= 0) ||
+           (data->vbus_det_gpio && data->vbus_det_irq <= 0))
                data->phy0_poll = true;
 
-       if (data->id_det_irq >= 0) {
+       if (data->id_det_irq > 0) {
                ret = devm_request_irq(dev, data->id_det_irq,
                                sun4i_usb_phy0_id_vbus_det_irq,
                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
@@ -660,7 +660,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
                }
        }
 
-       if (data->vbus_det_irq >= 0) {
+       if (data->vbus_det_irq > 0) {
                ret = devm_request_irq(dev, data->vbus_det_irq,
                                sun4i_usb_phy0_id_vbus_det_irq,
                                IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
index 0a477d24cf763326abace4ae8d20e1a834bfee8a..bf46844dc387c69f9c52d9cec3da2f5f0c7f16fe 100644 (file)
@@ -293,11 +293,18 @@ static int ti_pipe3_init(struct phy *x)
                ret = ti_pipe3_dpll_wait_lock(phy);
        }
 
-       /* Program the DPLL only if not locked */
+       /* SATA has issues if re-programmed when locked */
        val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
-       if (!(val & PLL_LOCK))
-               if (ti_pipe3_dpll_program(phy))
-                       return -EINVAL;
+       if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node,
+                                                       "ti,phy-pipe3-sata"))
+               return ret;
+
+       /* Program the DPLL */
+       ret = ti_pipe3_dpll_program(phy);
+       if (ret) {
+               ti_pipe3_disable_clocks(phy);
+               return -EINVAL;
+       }
 
        return ret;
 }
index 6b6af6cba45401ee98d1c7b967796157d2f9fcdc..d9b10a39a2cf77c6f67b3655b3a1f56143f0083e 100644 (file)
@@ -463,7 +463,8 @@ static int twl4030_phy_power_on(struct phy *phy)
        twl4030_usb_set_mode(twl, twl->usb_mode);
        if (twl->usb_mode == T2_USB_MODE_ULPI)
                twl4030_i2c_access(twl, 0);
-       schedule_delayed_work(&twl->id_workaround_work, 0);
+       twl->linkstat = MUSB_UNKNOWN;
+       schedule_delayed_work(&twl->id_workaround_work, HZ);
 
        return 0;
 }
@@ -537,6 +538,7 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
        struct twl4030_usb *twl = _twl;
        enum musb_vbus_id_status status;
        bool status_changed = false;
+       int err;
 
        status = twl4030_usb_linkstat(twl);
 
@@ -567,7 +569,9 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
                        pm_runtime_mark_last_busy(twl->dev);
                        pm_runtime_put_autosuspend(twl->dev);
                }
-               musb_mailbox(status);
+               err = musb_mailbox(status);
+               if (err)
+                       twl->linkstat = MUSB_UNKNOWN;
        }
 
        /* don't schedule during sleep - irq works right then */
@@ -595,7 +599,8 @@ static int twl4030_phy_init(struct phy *phy)
        struct twl4030_usb *twl = phy_get_drvdata(phy);
 
        pm_runtime_get_sync(twl->dev);
-       schedule_delayed_work(&twl->id_workaround_work, 0);
+       twl->linkstat = MUSB_UNKNOWN;
+       schedule_delayed_work(&twl->id_workaround_work, HZ);
        pm_runtime_mark_last_busy(twl->dev);
        pm_runtime_put_autosuspend(twl->dev);
 
@@ -763,7 +768,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
        if (cable_present(twl->linkstat))
                pm_runtime_put_noidle(twl->dev);
        pm_runtime_mark_last_busy(twl->dev);
-       pm_runtime_put_sync_suspend(twl->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
+       pm_runtime_put_sync(twl->dev);
        pm_runtime_disable(twl->dev);
 
        /* autogate 60MHz ULPI clock,
index e4bc1151e04f3c2d7ed0eac8b659ae27244432ba..42a5c1dddfefaf7414eca1809ef7d1bf75a11628 100644 (file)
@@ -23,7 +23,7 @@ obj-$(CONFIG_PINCTRL_PISTACHIO)       += pinctrl-pistachio.o
 obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
 obj-$(CONFIG_PINCTRL_SINGLE)   += pinctrl-single.o
 obj-$(CONFIG_PINCTRL_SIRF)     += sirf/
-obj-$(CONFIG_PINCTRL_TEGRA)    += tegra/
+obj-$(CONFIG_ARCH_TEGRA)       += tegra/
 obj-$(CONFIG_PINCTRL_TZ1090)   += pinctrl-tz1090.o
 obj-$(CONFIG_PINCTRL_TZ1090_PDC)       += pinctrl-tz1090-pdc.o
 obj-$(CONFIG_PINCTRL_U300)     += pinctrl-u300.o
index 47ccfcc8a647ccacdd0880ab770dc2c102d2ba9c..eccb47480e1db12d0212ea54c47b94d4b79d5cc8 100644 (file)
@@ -209,9 +209,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
                pin_reg = &info->pin_regs[pin_id];
 
                if (pin_reg->mux_reg == -1) {
-                       dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
+                       dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
                                info->pins[pin_id].name);
-                       return -EINVAL;
+                       continue;
                }
 
                if (info->flags & SHARE_MUX_CONF_REG) {
index 677a811b3a6ffcb604d8a6c4cb836ec8dd81575e..7abfd42e8ffdf86953764ad2fa2c1c9bf70fcc57 100644 (file)
@@ -401,9 +401,9 @@ static const struct byt_simple_func_mux byt_score_sata_mux[] = {
 static const unsigned int byt_score_plt_clk0_pins[] = { 96 };
 static const unsigned int byt_score_plt_clk1_pins[] = { 97 };
 static const unsigned int byt_score_plt_clk2_pins[] = { 98 };
-static const unsigned int byt_score_plt_clk4_pins[] = { 99 };
-static const unsigned int byt_score_plt_clk5_pins[] = { 100 };
-static const unsigned int byt_score_plt_clk3_pins[] = { 101 };
+static const unsigned int byt_score_plt_clk3_pins[] = { 99 };
+static const unsigned int byt_score_plt_clk4_pins[] = { 100 };
+static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
 static const struct byt_simple_func_mux byt_score_plt_clk_mux[] = {
        SIMPLE_FUNC("plt_clk", 1),
 };
index 207b13b618cf16c2b7c9ff863481580879d58ced..a607655d78309c23c1b1373103f93f307311248f 100644 (file)
@@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
        const struct mtk_desc_pin *pin;
 
        chained_irq_enter(chip, desc);
-       for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
+       for (eint_num = 0;
+            eint_num < pctl->devdata->ap_num;
+            eint_num += 32, reg += 4) {
                status = readl(reg);
-               reg += 4;
                while (status) {
                        offset = __ffs(status);
                        index = eint_num + offset;
index ccbfc325c7789e05ff2072fb021d73118990f1c1..38faceff2f084202e035e1a0c122073eb5f456c9 100644 (file)
@@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
 
        clk_enable(nmk_chip->clk);
 
-       dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+       dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
 
        clk_disable(nmk_chip->clk);
 
index d5bf9fae2ddd425cf32570b668a2c0a5b3aec59f..a87439ee4cdd1da81d40926560c8d9ca89b1285f 100644 (file)
@@ -391,4 +391,12 @@ exit:
 }
 EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
 
+void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
+                                struct pinctrl_map *map,
+                                unsigned num_maps)
+{
+       pinctrl_utils_free_map(pctldev, map, num_maps);
+}
+EXPORT_SYMBOL_GPL(pinconf_generic_dt_free_map);
+
 #endif
index cf9bafa10acfb51ad42af96cf6580d6de48c1b1d..bfdf720db270d8b2c47007455406d9887ed028c6 100644 (file)
@@ -1580,6 +1580,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
                else
                        mask &= ~soc_mask;
                pcs->write(mask, pcswi->reg);
+
+               /* flush posted write */
+               mask = pcs->read(pcswi->reg);
                raw_spin_unlock(&pcs->lock);
        }
 
index a927379b67949d1603eaf20e180febbc4e1c0823..d9ea2be69cc4a3a682045c345e0ce88800717a58 100644 (file)
@@ -1,4 +1,4 @@
-obj-y                                  += pinctrl-tegra.o
+obj-$(CONFIG_PINCTRL_TEGRA)            += pinctrl-tegra.o
 obj-$(CONFIG_PINCTRL_TEGRA20)          += pinctrl-tegra20.o
 obj-$(CONFIG_PINCTRL_TEGRA30)          += pinctrl-tegra30.o
 obj-$(CONFIG_PINCTRL_TEGRA114)         += pinctrl-tegra114.o
index 6d8ee3b1587276494274617d65029534df1d1af6..8abd80dbcbed7974b4ace4265dd9dad1bca89edd 100644 (file)
@@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
                goto exit;
        }
 
+       if (u_cmd.outsize != s_cmd->outsize ||
+           u_cmd.insize != s_cmd->insize) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
        s_cmd->command += ec->cmd_offset;
        ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
        /* Only copy data to userland if data was received. */
        if (ret < 0)
                goto exit;
 
-       if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
+       if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
                ret = -EFAULT;
 exit:
        kfree(s_cmd);
index c06bb85c283912590d306e3336e28419f32a0c7c..3ec0025d19e7e0e069a793cfe11e5452c2661831 100644 (file)
@@ -103,7 +103,6 @@ config DELL_SMBIOS
 
 config DELL_LAPTOP
        tristate "Dell Laptop Extras"
-       depends on X86
        depends on DELL_SMBIOS
        depends on DMI
        depends on BACKLIGHT_CLASS_DEVICE
@@ -505,7 +504,7 @@ config THINKPAD_ACPI_HOTKEY_POLL
 
 config SENSORS_HDAPS
        tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
-       depends on INPUT && X86
+       depends on INPUT
        select INPUT_POLLDEV
        default n
        help
@@ -749,7 +748,7 @@ config TOSHIBA_WMI
 
 config ACPI_CMPC
        tristate "CMPC Laptop Extras"
-       depends on X86 && ACPI
+       depends on ACPI
        depends on RFKILL || RFKILL=n
        select INPUT
        select BACKLIGHT_CLASS_DEVICE
@@ -848,7 +847,7 @@ config INTEL_IMR
 
 config INTEL_PMC_CORE
        bool "Intel PMC Core driver"
-       depends on X86 && PCI
+       depends on PCI
        ---help---
          The Intel Platform Controller Hub for Intel Core SoCs provides access
          to Power Management Controller registers via a PCI interface. This
@@ -860,7 +859,7 @@ config INTEL_PMC_CORE
 
 config IBM_RTL
        tristate "Device driver to enable PRTL support"
-       depends on X86 && PCI
+       depends on PCI
        ---help---
         Enable support for IBM Premium Real Time Mode (PRTM).
         This module will allow you the enter and exit PRTM in the BIOS via
@@ -894,7 +893,6 @@ config XO15_EBOOK
 
 config SAMSUNG_LAPTOP
        tristate "Samsung Laptop driver"
-       depends on X86
        depends on RFKILL || RFKILL = n
        depends on ACPI_VIDEO || ACPI_VIDEO = n
        depends on BACKLIGHT_CLASS_DEVICE
index 4a23fbc66b715bcc4fce2a6ddccb2c8b743ae7b2..d1a091b93192c7dc4a20d8dbf9901d65d918e1e1 100644 (file)
@@ -567,6 +567,7 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv)
 static const struct key_entry ideapad_keymap[] = {
        { KE_KEY, 6,  { KEY_SWITCHVIDEOMODE } },
        { KE_KEY, 7,  { KEY_CAMERA } },
+       { KE_KEY, 8,  { KEY_MICMUTE } },
        { KE_KEY, 11, { KEY_F16 } },
        { KE_KEY, 13, { KEY_WLAN } },
        { KE_KEY, 16, { KEY_PROG1 } },
@@ -809,6 +810,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
                                break;
                        case 13:
                        case 11:
+                       case 8:
                        case 7:
                        case 6:
                                ideapad_input_report(priv, vpc_bit);
index c3bfa1fe95bf3fb9c61d36c1f42012547ee07580..b65ce7519411a87f2d279360df1b8fe252efce0b 100644 (file)
@@ -2043,6 +2043,7 @@ static int hotkey_autosleep_ack;
 
 static u32 hotkey_orig_mask;           /* events the BIOS had enabled */
 static u32 hotkey_all_mask;            /* all events supported in fw */
+static u32 hotkey_adaptive_all_mask;   /* all adaptive events supported in fw */
 static u32 hotkey_reserved_mask;       /* events better left disabled */
 static u32 hotkey_driver_mask;         /* events needed by the driver */
 static u32 hotkey_user_mask;           /* events visible to userspace */
@@ -2742,6 +2743,17 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
 
 static DEVICE_ATTR_RO(hotkey_all_mask);
 
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+                       hotkey_adaptive_all_mask | hotkey_source_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_adaptive_all_mask);
+
 /* sysfs hotkey recommended_mask --------------------------------------- */
 static ssize_t hotkey_recommended_mask_show(struct device *dev,
                                            struct device_attribute *attr,
@@ -2985,6 +2997,7 @@ static struct attribute *hotkey_attributes[] __initdata = {
        &dev_attr_wakeup_hotunplug_complete.attr,
        &dev_attr_hotkey_mask.attr,
        &dev_attr_hotkey_all_mask.attr,
+       &dev_attr_hotkey_adaptive_all_mask.attr,
        &dev_attr_hotkey_recommended_mask.attr,
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
        &dev_attr_hotkey_source_mask.attr,
@@ -3321,20 +3334,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        if (!tp_features.hotkey)
                return 1;
 
-       /*
-        * Check if we have an adaptive keyboard, like on the
-        * Lenovo Carbon X1 2014 (2nd Gen).
-        */
-       if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
-               if ((hkeyv >> 8) == 2) {
-                       tp_features.has_adaptive_kbd = true;
-                       res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
-                                       &adaptive_kbd_attr_group);
-                       if (res)
-                               goto err_exit;
-               }
-       }
-
        quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
                                     ARRAY_SIZE(tpacpi_hotkey_qtable));
 
@@ -3357,30 +3356,70 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
           A30, R30, R31, T20-22, X20-21, X22-24.  Detected by checking
           for HKEY interface version 0x100 */
        if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
-               if ((hkeyv >> 8) != 1) {
-                       pr_err("unknown version of the HKEY interface: 0x%x\n",
-                              hkeyv);
-                       pr_err("please report this to %s\n", TPACPI_MAIL);
-               } else {
+               vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+                           "firmware HKEY interface version: 0x%x\n",
+                           hkeyv);
+
+               switch (hkeyv >> 8) {
+               case 1:
                        /*
                         * MHKV 0x100 in A31, R40, R40e,
                         * T4x, X31, and later
                         */
-                       vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
-                               "firmware HKEY interface version: 0x%x\n",
-                               hkeyv);
 
                        /* Paranoia check AND init hotkey_all_mask */
                        if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
                                        "MHKA", "qd")) {
-                               pr_err("missing MHKA handler, "
-                                      "please report this to %s\n",
+                               pr_err("missing MHKA handler, please report this to %s\n",
                                       TPACPI_MAIL);
                                /* Fallback: pre-init for FN+F3,F4,F12 */
                                hotkey_all_mask = 0x080cU;
                        } else {
                                tp_features.hotkey_mask = 1;
                        }
+                       break;
+
+               case 2:
+                       /*
+                        * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)
+                        */
+
+                       /* Paranoia check AND init hotkey_all_mask */
+                       if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+                                       "MHKA", "dd", 1)) {
+                               pr_err("missing MHKA handler, please report this to %s\n",
+                                      TPACPI_MAIL);
+                               /* Fallback: pre-init for FN+F3,F4,F12 */
+                               hotkey_all_mask = 0x080cU;
+                       } else {
+                               tp_features.hotkey_mask = 1;
+                       }
+
+                       /*
+                        * Check if we have an adaptive keyboard, like on the
+                        * Lenovo Carbon X1 2014 (2nd Gen).
+                        */
+                       if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
+                                      "MHKA", "dd", 2)) {
+                               if (hotkey_adaptive_all_mask != 0) {
+                                       tp_features.has_adaptive_kbd = true;
+                                       res = sysfs_create_group(
+                                               &tpacpi_pdev->dev.kobj,
+                                               &adaptive_kbd_attr_group);
+                                       if (res)
+                                               goto err_exit;
+                               }
+                       } else {
+                               tp_features.has_adaptive_kbd = false;
+                               hotkey_adaptive_all_mask = 0x0U;
+                       }
+                       break;
+
+               default:
+                       pr_err("unknown version of the HKEY interface: 0x%x\n",
+                              hkeyv);
+                       pr_err("please report this to %s\n", TPACPI_MAIL);
+                       break;
                }
        }
 
index 456987c88baab9f4b8a0d2b7224b10d659b8bddc..b13cd074c52afc90c086bc16cc011b7adb0ac91f 100644 (file)
@@ -565,11 +565,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
 
        WARN_ON(tzd == NULL);
        psy = tzd->devdata;
-       ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+       ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+       if (ret)
+               return ret;
 
        /* Convert tenths of degree Celsius to milli degree Celsius. */
-       if (!ret)
-               *temp = val.intval * 100;
+       *temp = val.intval * 100;
 
        return ret;
 }
@@ -612,10 +613,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
        int ret;
 
        psy = tcd->devdata;
-       ret = psy->desc->get_property(psy,
-               POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
-       if (!ret)
-               *state = val.intval;
+       ret = power_supply_get_property(psy,
+                       POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+       if (ret)
+               return ret;
+
+       *state = val.intval;
 
        return ret;
 }
@@ -628,10 +631,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
        int ret;
 
        psy = tcd->devdata;
-       ret = psy->desc->get_property(psy,
-               POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
-       if (!ret)
-               *state = val.intval;
+       ret = power_supply_get_property(psy,
+                       POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+       if (ret)
+               return ret;
+
+       *state = val.intval;
 
        return ret;
 }
index d9f56730c735819010e9efff4f8499814c7fbe58..73dfae41def8a659978eec9a738448f604cb1973 100644 (file)
@@ -197,6 +197,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
 {
        struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
        struct tps65217_charger *charger;
+       struct power_supply_config cfg = {};
        int ret;
 
        dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -208,9 +209,12 @@ static int tps65217_charger_probe(struct platform_device *pdev)
        charger->tps = tps;
        charger->dev = &pdev->dev;
 
+       cfg.of_node = pdev->dev.of_node;
+       cfg.drv_data = charger;
+
        charger->ac = devm_power_supply_register(&pdev->dev,
                                                 &tps65217_charger_desc,
-                                                NULL);
+                                                &cfg);
        if (IS_ERR(charger->ac)) {
                dev_err(&pdev->dev, "failed: power supply register\n");
                return PTR_ERR(charger->ac);
index 38a8bbe7481008232e7336777850d683afb765be..83797d89c30fe88665c5d7214c391a0a571cfe88 100644 (file)
@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
        struct pps_client_pp *device;
 
        /* FIXME: oooh, this is ugly! */
-       if (strcmp(pardev->name, KBUILD_MODNAME))
+       if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
                /* not our port */
                return;
 
index 579fd65299a0cfc18b98e8e51a619e26bf82945f..d637c933c8a90655ffe152101dda2b9c71fd3836 100644 (file)
@@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                break;
 
        case PTP_SYS_OFFSET:
-               sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
-               if (!sysoff) {
-                       err = -ENOMEM;
-                       break;
-               }
-               if (copy_from_user(sysoff, (void __user *)arg,
-                                  sizeof(*sysoff))) {
-                       err = -EFAULT;
+               sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
+               if (IS_ERR(sysoff)) {
+                       err = PTR_ERR(sysoff);
+                       sysoff = NULL;
                        break;
                }
                if (sysoff->n_samples > PTP_MAX_SAMPLES) {
index dba3843c53b8d2162d10100d2473be79014d4b4b..ed337a8c34ab4bb231814136fe5cd9628962eff1 100644 (file)
@@ -457,7 +457,8 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
 {
        int err;
 
-       if (!pwm)
+       if (!pwm || !state || !state->period ||
+           state->duty_cycle > state->period)
                return -EINVAL;
 
        if (!memcmp(state, &pwm->state, sizeof(*state)))
index f994c7eaf41c6c3d67a0c688247561dadee49863..14fc011faa32fee990c7ab9253d5faabe67465d4 100644 (file)
@@ -272,7 +272,7 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
        chip->chip.of_pwm_n_cells = 3;
        chip->chip.can_sleep = 1;
 
-       ret = pwmchip_add(&chip->chip);
+       ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
        if (ret) {
                clk_disable_unprepare(hlcdc->periph_clk);
                return ret;
index d98599249a05688137325fcf906415e14aa7aed4..01695d48dd543152624b9ed40a9c3afd9de45d0d 100644 (file)
@@ -152,7 +152,7 @@ static ssize_t enable_store(struct device *child,
                goto unlock;
        }
 
-       pwm_apply_state(pwm, &state);
+       ret = pwm_apply_state(pwm, &state);
 
 unlock:
        mutex_unlock(&export->lock);
index 63cd5e68c86494724eae44502bed2d7f590cbf78..3a6d0290c54c0fbd0f1c82ffcd5329c2facc56ae 100644 (file)
@@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
                if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
                        sreg->sel = 22;
 
-               if (!sreg->sel) {
+               if (!sreg->bypass && !sreg->sel) {
                        dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
                        return -EINVAL;
                }
index 321e804aeab0a33c075caafb95ccc7bff4bf417f..a1b49a6d538f1493489f2e45092feb6dd0b96219 100644 (file)
@@ -123,6 +123,9 @@ static int max77620_regulator_set_fps_src(struct max77620_regulator *pmic,
        unsigned int val;
        int ret;
 
+       if (!rinfo)
+               return 0;
+
        switch (fps_src) {
        case MAX77620_FPS_SRC_0:
        case MAX77620_FPS_SRC_1:
@@ -171,6 +174,9 @@ static int max77620_regulator_set_fps_slots(struct max77620_regulator *pmic,
        int pd = rpdata->active_fps_pd_slot;
        int ret = 0;
 
+       if (!rinfo)
+               return 0;
+
        if (is_suspend) {
                pu = rpdata->suspend_fps_pu_slot;
                pd = rpdata->suspend_fps_pd_slot;
@@ -680,7 +686,6 @@ static struct max77620_regulator_info max77620_regs_info[MAX77620_NUM_REGS] = {
        RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1),
        RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
        RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
-       RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
 
        RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
        RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
index 56a17ec5b5efe2432937351a7f45d441a9e48722..6c7fe4778793758478d9a27e3f9d11f6d4b4a1b2 100644 (file)
@@ -140,6 +140,18 @@ static const struct regulator_ops rpm_smps_ldo_ops = {
        .enable = rpm_reg_enable,
        .disable = rpm_reg_disable,
        .is_enabled = rpm_reg_is_enabled,
+       .list_voltage = regulator_list_voltage_linear_range,
+
+       .get_voltage = rpm_reg_get_voltage,
+       .set_voltage = rpm_reg_set_voltage,
+
+       .set_load = rpm_reg_set_load,
+};
+
+static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
+       .enable = rpm_reg_enable,
+       .disable = rpm_reg_disable,
+       .is_enabled = rpm_reg_is_enabled,
 
        .get_voltage = rpm_reg_get_voltage,
        .set_voltage = rpm_reg_set_voltage,
@@ -247,7 +259,7 @@ static const struct regulator_desc pm8941_nldo = {
 static const struct regulator_desc pm8941_lnldo = {
        .fixed_uV = 1740000,
        .n_voltages = 1,
-       .ops = &rpm_smps_ldo_ops,
+       .ops = &rpm_smps_ldo_ops_fixed,
 };
 
 static const struct regulator_desc pm8941_switch = {
index 572816e30095aebac1025e5a97343c4272cde2d5..c139890c1514401b38ff02367c67c2475f3d2739 100644 (file)
@@ -94,11 +94,14 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
                int ramp_delay)
 {
        struct tps51632_chip *tps = rdev_get_drvdata(rdev);
-       int bit = ramp_delay/6000;
+       int bit;
        int ret;
 
-       if (bit)
-               bit--;
+       if (ramp_delay == 0)
+               bit = 0;
+       else
+               bit = DIV_ROUND_UP(ramp_delay, 6000) - 1;
+
        ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit));
        if (ret < 0)
                dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret);
index 80b1979e8d955f6022e7810a4b65a62f13ec1224..df036b872b050b835d0fea6f620726dc8876225b 100644 (file)
@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
                qeth_l2_set_offline(cgdev);
 
        if (card->dev) {
+               netif_napi_del(&card->napi);
                unregister_netdev(card->dev);
                card->dev = NULL;
        }
index ac544330daeb7cffaccc37306f041deffd1475bb..709b52339ff9a5907812d315e65bca964ddd11be 100644 (file)
@@ -3226,6 +3226,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
                qeth_l3_set_offline(cgdev);
 
        if (card->dev) {
+               netif_napi_del(&card->napi);
                unregister_netdev(card->dev);
                card->dev = NULL;
        }
index d4c285688ce98b8222dcc778ee646f6581e48df3..3ddc85e6efd65c845fa3a306c832ad2b1a48ac1d 100644 (file)
@@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
                } else {
                        struct scsi_cmnd *SCp;
 
-                       SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
+                       SCp = SDp->current_cmnd;
                        if(unlikely(SCp == NULL)) {
                                sdev_printk(KERN_ERR, SDp,
                                        "no saved request for untagged cmd\n");
@@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
                       slot->tag, slot);
        } else {
                slot->tag = SCSI_NO_TAG;
-               /* must populate current_cmnd for scsi_host_find_tag to work */
+               /* save current command for reselection */
                SCp->device->current_cmnd = SCp;
        }
        /* sanity check: some of the commands generated by the mid-layer
index 8f90d9e77104968bfb0e7a36cadf5227526932d8..969c312de1be38b9f27f5c5746c0eaf8e93865d0 100644 (file)
@@ -620,6 +620,11 @@ struct aac_driver_ident
  */
 #define AAC_QUIRK_SCSI_32      0x0020
 
+/*
+ * SRC based adapters support the AifReqEvent functions
+ */
+#define AAC_QUIRK_SRC 0x0040
+
 /*
  *     The adapter interface specs all queues to be located in the same
  *     physically contiguous block. The host structure that defines the
index a943bd230bc2fdb6cb5272c911c30fe0eea2e2b8..79871f3519ffc923ed9c41848d70b17120c37844 100644 (file)
@@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = {
        { aac_rx_init, "aacraid",  "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Catch All */
        { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Rocket Catch All */
        { aac_nark_init, "aacraid", "ADAPTEC ", "RAID           ", 2 }, /* Adaptec NEMER/ARK Catch All */
-       { aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
-       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 7 (Denali) */
-       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec PMC Series 8 */
-       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2 } /* Adaptec PMC Series 9 */
+       { aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
+       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
+       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
+       { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
 };
 
 /**
@@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        else
                shost->this_id = shost->max_id;
 
-       aac_intr_normal(aac, 0, 2, 0, NULL);
+       if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+               aac_intr_normal(aac, 0, 2, 0, NULL);
 
        /*
         * dmb - we may need to move the setting of these parms somewhere else once
index d6a691e27d33317c72d785340b5b371378932741..d6803a9e5ab8a49ea045bcbfafce7029ef439c5d 100644 (file)
@@ -10093,6 +10093,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
                ioa_cfg->intr_flag = IPR_USE_MSI;
        else {
                ioa_cfg->intr_flag = IPR_USE_LSI;
+               ioa_cfg->clear_isr = 1;
                ioa_cfg->nvectors = 1;
                dev_info(&pdev->dev, "Cannot enable MSI.\n");
        }
index 6a4df5a315e93b09a3f844ee5068375b41fcfa32..6bff13e7afc7b706a69ab8c99562e5f0e5ce9ddf 100644 (file)
@@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
                ActiveCableEventData =
                    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
                if (ActiveCableEventData->ReasonCode ==
-                               MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER)
+                               MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
                        pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
                            ioc->name, ActiveCableEventData->ReceptacleID);
                        pr_info("cannot be powered and devices connected to this active cable");
                        pr_info("will not be seen. This active cable");
                        pr_info("requires %d mW of power",
                            ActiveCableEventData->ActiveCablePowerRequirement);
+               }
                break;
 
        default: /* ignore the rest */
index 5649c200d37ce12f80fc49ad860a185c1a52af5f..a92a62dea7934429e48c252f77405a8dd70b6400 100644 (file)
@@ -2548,7 +2548,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        if (!vha->flags.online)
                return;
 
-       if (rsp->msix->cpuid != smp_processor_id()) {
+       if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
                /* if kernel does not notify qla of IRQ's CPU change,
                 * then set it here.
                 */
index 3408578b08d6adb23fe38c6a8c84c066236bc4ad..eaccd651ccda0d239af91ebfb6dfdbd97ac340e3 100644 (file)
@@ -230,6 +230,7 @@ static struct {
        {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
        {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
        {"Promise", "", NULL, BLIST_SPARSELUN},
+       {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
        {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
        {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
        {"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
@@ -428,7 +429,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
         * here, and we don't know what device it is
         * trying to work with, leave it as-is.
         */
-       vmax = 8;       /* max length of vendor */
+       vmax = sizeof(devinfo->vendor);
        vskip = vendor;
        while (vmax > 0 && *vskip == ' ') {
                vmax--;
@@ -438,7 +439,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
        while (vmax > 0 && vskip[vmax - 1] == ' ')
                --vmax;
 
-       mmax = 16;      /* max length of model */
+       mmax = sizeof(devinfo->model);
        mskip = model;
        while (mmax > 0 && *mskip == ' ') {
                mmax--;
@@ -454,10 +455,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
                         * Behave like the older version of get_device_flags.
                         */
                        if (memcmp(devinfo->vendor, vskip, vmax) ||
-                                       devinfo->vendor[vmax])
+                                       (vmax < sizeof(devinfo->vendor) &&
+                                               devinfo->vendor[vmax]))
                                continue;
                        if (memcmp(devinfo->model, mskip, mmax) ||
-                                       devinfo->model[mmax])
+                                       (mmax < sizeof(devinfo->model) &&
+                                               devinfo->model[mmax]))
                                continue;
                        return devinfo;
                } else {
index a8b610eaa0caa48bb83a36ccb0c514f6dc217d2d..106a6adbd6f12d0a86906239345ec3423eab88cf 100644 (file)
@@ -1128,7 +1128,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
  */
 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
 {
-       scmd->device->host->host_failed--;
        scmd->eh_eflags = 0;
        list_move_tail(&scmd->eh_entry, done_q);
 }
@@ -2227,6 +2226,9 @@ int scsi_error_handler(void *data)
                else
                        scsi_unjam_host(shost);
 
+               /* All scmds have been handled */
+               shost->host_failed = 0;
+
                /*
                 * Note - if the above fails completely, the action is to take
                 * individual devices offline and flush the queue of any
index b2e332af0f51f6bc0f06ec08888b003c81be9d4f..c71344aebdbb1d5cf1f07e082b48000fb8ad0c64 100644 (file)
@@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        }
 
        /*
-        * If we finished all bytes in the request we are done now.
+        * special case: failed zero length commands always need to
+        * drop down into the retry code. Otherwise, if we finished
+        * all bytes in the request we are done now.
         */
-       if (!scsi_end_request(req, error, good_bytes, 0))
+       if (!(blk_rq_bytes(req) == 0 && error) &&
+           !scsi_end_request(req, error, good_bytes, 0))
                return;
 
        /*
index 428c03ef02b21ec538f1d55204f2ff27351bb445..60bff78e9ead8703e1e912307c504330d1a76d1f 100644 (file)
@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp,
  **/
 static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
 {
-       struct scsi_disk *sdkp = scsi_disk(disk);
-       struct scsi_device *sdp = sdkp->device;
+       struct scsi_disk *sdkp = scsi_disk_get(disk);
+       struct scsi_device *sdp;
        struct scsi_sense_hdr *sshdr = NULL;
        int retval;
 
+       if (!sdkp)
+               return 0;
+
+       sdp = sdkp->device;
        SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
 
        /*
@@ -1459,6 +1463,7 @@ out:
        kfree(sshdr);
        retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
        sdp->changed = 0;
+       scsi_disk_put(sdkp);
        return retval;
 }
 
@@ -2862,10 +2867,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
        if (sdkp->opt_xfer_blocks &&
            sdkp->opt_xfer_blocks <= dev_max &&
            sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
-           sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
-               rw_max = q->limits.io_opt =
-                       sdkp->opt_xfer_blocks * sdp->sector_size;
-       else
+           logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
+               q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+               rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+       else
                rw_max = BLK_DEF_MAX_SECTORS;
 
        /* Combine with controller limits */
index 654630bb7d0edeb48438654e47d8a60151d2c87b..765a6f1ac1b7320edc860b1eb6baa30e7e1beac4 100644 (file)
@@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
        return blocks << (ilog2(sdev->sector_size) - 9);
 }
 
+static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
+{
+       return blocks * sdev->sector_size;
+}
+
 /*
  * A DIF-capable target device can be formatted with different
  * protection schemes.  Currently 0 through 3 are defined:
index cd89682065b98d7a06a7d7064ec30aff2a5e7fef..1026e180eed79019f0ec55d9ed5ba427ce71cefa 100644 (file)
@@ -578,7 +578,7 @@ static int rockchip_spi_transfer_one(
                struct spi_device *spi,
                struct spi_transfer *xfer)
 {
-       int ret = 1;
+       int ret = 0;
        struct rockchip_spi *rs = spi_master_get_devdata(master);
 
        WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -627,6 +627,8 @@ static int rockchip_spi_transfer_one(
                        spi_enable_chip(rs, 1);
                        ret = rockchip_spi_prepare_dma(rs);
                }
+               /* successful DMA prepare means the transfer is in progress */
+               ret = ret ? ret : 1;
        } else {
                spi_enable_chip(rs, 1);
                ret = rockchip_spi_pio_transfer(rs);
index 1ddd9e2309b685576a8c63abc1770f731a7acd0c..cf007f3b83ec92f1d8f976ea2b68978dbdd36f20 100644 (file)
@@ -173,13 +173,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
 {
        struct sun4i_spi *sspi = spi_master_get_devdata(master);
        unsigned int mclk_rate, div, timeout;
+       unsigned int start, end, tx_time;
        unsigned int tx_len = 0;
        int ret = 0;
        u32 reg;
 
        /* We don't support transfer larger than the FIFO */
        if (tfr->len > SUN4I_FIFO_DEPTH)
-               return -EINVAL;
+               return -EMSGSIZE;
+
+       if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+               return -EMSGSIZE;
 
        reinit_completion(&sspi->done);
        sspi->tx_buf = tfr->tx_buf;
@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
        sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
        sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
 
-       /* Fill the TX FIFO */
-       sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+       /*
+        * Fill the TX FIFO
+        * Filling the FIFO fully causes timeout for some reason
+        * at least on spi2 on A10s
+        */
+       sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
 
        /* Enable the interrupts */
        sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
        reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
        sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
 
+       tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+       start = jiffies;
        timeout = wait_for_completion_timeout(&sspi->done,
-                                             msecs_to_jiffies(1000));
+                                             msecs_to_jiffies(tx_time));
+       end = jiffies;
        if (!timeout) {
+               dev_warn(&master->dev,
+                        "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+                        dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+                        jiffies_to_msecs(end - start), tx_time);
                ret = -ETIMEDOUT;
                goto out;
        }
index 42e2c4bd690a253b423f42b0dfc6d1fc7c379ae9..7fce79a60608873d42af3b4b5203b4a94942ade1 100644 (file)
@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
 {
        struct sun6i_spi *sspi = spi_master_get_devdata(master);
        unsigned int mclk_rate, div, timeout;
+       unsigned int start, end, tx_time;
        unsigned int tx_len = 0;
        int ret = 0;
        u32 reg;
@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
        reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
        sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
 
+       tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+       start = jiffies;
        timeout = wait_for_completion_timeout(&sspi->done,
-                                             msecs_to_jiffies(1000));
+                                             msecs_to_jiffies(tx_time));
+       end = jiffies;
        if (!timeout) {
+               dev_warn(&master->dev,
+                        "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+                        dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+                        jiffies_to_msecs(end - start), tx_time);
                ret = -ETIMEDOUT;
                goto out;
        }
index 443f664534e144fd388e2baba7f4d9bb49134772..29ea8d2f982498c4290e4afc15093165656e7897 100644 (file)
@@ -646,6 +646,13 @@ free_master:
 
 static int ti_qspi_remove(struct platform_device *pdev)
 {
+       struct ti_qspi *qspi = platform_get_drvdata(pdev);
+       int rc;
+
+       rc = spi_master_suspend(qspi->master);
+       if (rc)
+               return rc;
+
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
index b56885c148396a845e62464d38aed2d6832f6625..ebb34dca60dfef38e1924531245596881e4f2760 100644 (file)
@@ -68,7 +68,8 @@ struct sync_timeline {
 
        /* protected by child_list_lock */
        bool                    destroyed;
-       int                     context, value;
+       u64                     context;
+       int                     value;
 
        struct list_head        child_list_head;
        spinlock_t              child_list_lock;
index a8f533af9ecaeffcd39143ca1379dd8186714179..ec12181822e659a508a977caf5e8acf43b7472ed 100644 (file)
@@ -594,7 +594,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
                goto error_ret_mut;
        ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
        mutex_unlock(&st->lock);
-       if (ret)
+       if (ret < 0)
                goto error_ret;
        val = ret;
        if (base_freq > 0)
index 825da0769936ace09907977be92392dda5447407..9587fa86dc69589fc34aa0105584ef3b24add210 100644 (file)
@@ -21,7 +21,7 @@ static int ad7606_spi_read_block(struct device *dev,
 {
        struct spi_device *spi = to_spi_device(dev);
        int i, ret;
-       unsigned short *data;
+       unsigned short *data = buf;
        __be16 *bdata = buf;
 
        ret = spi_read(spi, buf, count * 2);
index 9f43976f4ef217f822916c5cead94abfeb764385..170ac980abcb84f1c4079b99187d6a18262ca1a4 100644 (file)
@@ -444,10 +444,10 @@ static ssize_t ad5933_store(struct device *dev,
                st->settling_cycles = val;
 
                /* 2x, 4x handling, see datasheet */
-               if (val > 511)
-                       val = (val >> 1) | (1 << 9);
-               else if (val > 1022)
+               if (val > 1022)
                        val = (val >> 2) | (3 << 9);
+               else if (val > 511)
+                       val = (val >> 1) | (1 << 9);
 
                dat = cpu_to_be16(val);
                ret = ad5933_i2c_write(st->client,
index bbfee53cfcf50296e52a7849b4f0964145db1b13..845e49a52430db2f091ed4ff7bbeeed8f52efad7 100644 (file)
@@ -2521,12 +2521,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
        return 0;
 
  failed:
-       if (ni)
+       if (ni) {
                lnet_ni_decref(ni);
+               rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
+               rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+       }
 
        rej.ibr_version             = version;
-       rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
-       rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
        kiblnd_reject(cmid, &rej);
 
        return -ECONNREFUSED;
index c17870cddb5b10e9077623b0887ffe7c41105902..fbce1f7e68caf5c3517179aa01bfbb16265f8c42 100644 (file)
@@ -102,7 +102,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8  *pbuf)
        if (!efuseTbl)
                return;
 
-       eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord));
+       eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
        if (!eFuseWord) {
                DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
                goto eFuseWord_failed;
index 87ea3b8449518e6a1e508a8434a139588876da51..363f3a34ddce48abb4ccac7c7a09edbe45107c48 100644 (file)
@@ -2072,7 +2072,8 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
 {
        struct hal_ops  *halfunc = &adapt->HalFunc;
 
-       adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL);
+
+       adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
        if (!adapt->HalData)
                DBG_88E("cant not alloc memory for HAL DATA\n");
 
index 6ceac4f2d4b227d52045632992568d08ea158c5b..5b4b47ed948ba2735870e78e50fe860c14e7d4a7 100644 (file)
@@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np,
                goto free_power_table;
        }
 
-       snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
-                cpufreq_dev->id);
-
-       cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
-                                                     &cpufreq_cooling_ops);
-       if (IS_ERR(cool_dev))
-               goto remove_idr;
-
        /* Fill freq-table in descending order of frequencies */
        for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
                freq = find_next_max(table, freq);
@@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np,
                        pr_debug("%s: freq:%u KHz\n", __func__, freq);
        }
 
+       snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+                cpufreq_dev->id);
+
+       cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
+                                                     &cpufreq_cooling_ops);
+       if (IS_ERR(cool_dev))
+               goto remove_idr;
+
        cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
        cpufreq_dev->cool_dev = cool_dev;
 
index 13d431cbd29ecf6b5ab558a7b15e40181cab1f95..a578cd257db4b57ef64751a2d85a5078355d5d1b 100644 (file)
@@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev)
                return -ENODEV;
        d->raw_bd = bd;
 
-       ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br);
+       ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL);
        if (ret)
                return ret;
 
index 82c4d2e45319b2ad27092469cd437d587d327560..95103054c0e469faf05472a6f3aa6e84e132eee1 100644 (file)
@@ -120,17 +120,6 @@ config UNIX98_PTYS
          All modern Linux systems use the Unix98 ptys.  Say Y unless
          you're on an embedded system and want to conserve memory.
 
-config DEVPTS_MULTIPLE_INSTANCES
-       bool "Support multiple instances of devpts"
-       depends on UNIX98_PTYS
-       default n
-       ---help---
-         Enable support for multiple instances of devpts filesystem.
-         If you want to have isolated PTY namespaces (eg: in containers),
-         say Y here.  Otherwise, say N. If enabled, each mount of devpts
-         filesystem with the '-o newinstance' option will create an
-         independent PTY namespace.
-
 config LEGACY_PTYS
        bool "Legacy (BSD) PTY support"
        default y
index dd4b8417e7f4688b4b35488c2a8ad71a077310ca..51e0d32883ba7d3bbb5b626dcb6314a49d210dff 100644 (file)
@@ -667,8 +667,11 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
                fsi = tty->driver_data;
        else
                fsi = tty->link->driver_data;
-       devpts_kill_index(fsi, tty->index);
-       devpts_put_ref(fsi);
+
+       if (fsi) {
+               devpts_kill_index(fsi, tty->index);
+               devpts_release(fsi);
+       }
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -733,10 +736,11 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        if (retval)
                return retval;
 
-       fsi = devpts_get_ref(inode, filp);
-       retval = -ENODEV;
-       if (!fsi)
+       fsi = devpts_acquire(filp);
+       if (IS_ERR(fsi)) {
+               retval = PTR_ERR(fsi);
                goto out_free_file;
+       }
 
        /* find a device that is not in use. */
        mutex_lock(&devpts_mutex);
@@ -745,7 +749,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
 
        retval = index;
        if (index < 0)
-               goto out_put_ref;
+               goto out_put_fsi;
 
 
        mutex_lock(&tty_mutex);
@@ -789,8 +793,8 @@ err_release:
        return retval;
 out:
        devpts_kill_index(fsi, index);
-out_put_ref:
-       devpts_put_ref(fsi);
+out_put_fsi:
+       devpts_release(fsi);
 out_free_file:
        tty_free_file(filp);
        return retval;
index f973bfce5d089256086b945fb37148fffebf902d..1e93a37e27f05091c6ade07f2798a514c55ae2b8 100644 (file)
@@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
 
 static void do_compute_shiftstate(void)
 {
-       unsigned int i, j, k, sym, val;
+       unsigned int k, sym, val;
 
        shift_state = 0;
        memset(shift_down, 0, sizeof(shift_down));
 
-       for (i = 0; i < ARRAY_SIZE(key_down); i++) {
-
-               if (!key_down[i])
+       for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
+               sym = U(key_maps[0][k]);
+               if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
                        continue;
 
-               k = i * BITS_PER_LONG;
-
-               for (j = 0; j < BITS_PER_LONG; j++, k++) {
-
-                       if (!test_bit(k, key_down))
-                               continue;
+               val = KVAL(sym);
+               if (val == KVAL(K_CAPSSHIFT))
+                       val = KVAL(K_SHIFT);
 
-                       sym = U(key_maps[0][k]);
-                       if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
-                               continue;
-
-                       val = KVAL(sym);
-                       if (val == KVAL(K_CAPSSHIFT))
-                               val = KVAL(K_SHIFT);
-
-                       shift_down[val]++;
-                       shift_state |= (1 << val);
-               }
+               shift_down[val]++;
+               shift_state |= BIT(val);
        }
 }
 
index dc125322f48f9d8c958ff09ce6e0406e77d48882..5b0fe97c46ca9fb0bff6b66e0638226bf1b5781d 100644 (file)
@@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
        vc->vc_complement_mask = 0;
        vc->vc_can_do_color = 0;
        vc->vc_panic_force_write = false;
+       vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
        vc->vc_sw->con_init(vc, init);
        if (!vc->vc_complement_mask)
                vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
index 9059b7dc185ee7dcb5e7d33d123337ff862fa6bd..2f537bbdda093d90285789cd92f77da608dbbe14 100644 (file)
@@ -21,6 +21,7 @@
  * 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/mutex.h>
@@ -450,3 +451,4 @@ int otg_statemachine(struct otg_fsm *fsm)
        return fsm->state_changed;
 }
 EXPORT_SYMBOL_GPL(otg_statemachine);
+MODULE_LICENSE("GPL");
index 34b837ae1ed79ebb909c47e3fa8c75345714b88b..d2e3f655c26f6ce16e4b63a82d7e83afbd15a524 100644 (file)
@@ -2598,26 +2598,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
  * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
  * deallocated.
  *
- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
- * freed.  When hcd_release() is called for either hcd in a peer set
- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
- * block new peering attempts
+ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
+ * freed.  When hcd_release() is called for either hcd in a peer set,
+ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
  */
 static void hcd_release(struct kref *kref)
 {
        struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
 
        mutex_lock(&usb_port_peer_mutex);
-       if (usb_hcd_is_primary_hcd(hcd)) {
-               kfree(hcd->address0_mutex);
-               kfree(hcd->bandwidth_mutex);
-       }
        if (hcd->shared_hcd) {
                struct usb_hcd *peer = hcd->shared_hcd;
 
                peer->shared_hcd = NULL;
-               if (peer->primary_hcd == hcd)
-                       peer->primary_hcd = NULL;
+               peer->primary_hcd = NULL;
+       } else {
+               kfree(hcd->address0_mutex);
+               kfree(hcd->bandwidth_mutex);
        }
        mutex_unlock(&usb_port_peer_mutex);
        kfree(hcd);
index 6dc810bce295ab22dbb10b47755dddb421f96957..944a6dca0fcb50298ce1b0c21f9f7cd36c1707ca 100644 (file)
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* USB3503 */
+       { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Microsoft Wireless Laser Mouse 6000 Receiver */
        { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -173,6 +176,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* MAYA44USB sound device */
        { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* ASUS Base Station(T100) */
+       { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
@@ -188,26 +195,22 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1908, 0x1315), .driver_info =
                        USB_QUIRK_HONOR_BNUMINTERFACES },
 
-       /* INTEL VALUE SSD */
-       { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
-
-       /* USB3503 */
-       { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
-
-       /* ASUS Base Station(T100) */
-       { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
-                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
-
        /* Protocol and OTG Electrical Test Device */
        { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Acer C120 LED Projector */
+       { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Blackmagic Design Intensity Shuttle */
        { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
 
        /* Blackmagic Design UltraStudio SDI */
        { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
 
+       /* INTEL VALUE SSD */
+       { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
        { }  /* terminating entry must be last */
 };
 
index 3c58d633ce8057a7941ba521e568e21f295bbfca..dec0b21fc62687d700ffc3b040ec16a70a530710 100644 (file)
        DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt),                \
                                dev_name(hsotg->dev), ##__VA_ARGS__)
 
+#ifdef CONFIG_MIPS
+/*
+ * There are some MIPS machines that can run in either big-endian
+ * or little-endian mode and that use the dwc2 register without
+ * a byteswap in both ways.
+ * Unlike other architectures, MIPS apparently does not require a
+ * barrier before the __raw_writel() to synchronize with DMA but does
+ * require the barrier after the __raw_writel() to serialize a set of
+ * writes. This set of operations was added specifically for MIPS and
+ * should only be used there.
+ */
 static inline u32 dwc2_readl(const void __iomem *addr)
 {
        u32 value = __raw_readl(addr);
@@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
        pr_info("INFO:: wrote %08x to %p\n", value, addr);
 #endif
 }
+#else
+/* Normal architectures just use readl/write */
+static inline u32 dwc2_readl(const void __iomem *addr)
+{
+       return readl(addr);
+}
+
+static inline void dwc2_writel(u32 value, void __iomem *addr)
+{
+       writel(value, addr);
+
+#ifdef DWC2_LOG_WRITES
+       pr_info("info:: wrote %08x to %p\n", value, addr);
+#endif
+}
+#endif
 
 /* Maximum number of Endpoints/HostChannels */
 #define MAX_EPS_CHANNELS       16
index 4c5e3005e1dc780c1db4632c7f3356398e1c544b..26cf09d0fe3ce330ed65ac8dfa3dc4c38d809769 100644 (file)
@@ -1018,7 +1018,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
        return 1;
 }
 
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
 
 /**
  * get_ep_head - return the first request on the endpoint
@@ -1094,7 +1094,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
                case USB_ENDPOINT_HALT:
                        halted = ep->halted;
 
-                       dwc2_hsotg_ep_sethalt(&ep->ep, set);
+                       dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
 
                        ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
                        if (ret) {
@@ -2948,8 +2948,13 @@ static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
  * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
  * @ep: The endpoint to set halt.
  * @value: Set or unset the halt.
+ * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
+ *       the endpoint is busy processing requests.
+ *
+ * We need to stall the endpoint immediately if request comes from set_feature
+ * protocol command handler.
  */
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
 {
        struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
        struct dwc2_hsotg *hs = hs_ep->parent;
@@ -2969,6 +2974,17 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
                return 0;
        }
 
+       if (hs_ep->isochronous) {
+               dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
+               return -EINVAL;
+       }
+
+       if (!now && value && !list_empty(&hs_ep->queue)) {
+               dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
+                       ep->name);
+               return -EAGAIN;
+       }
+
        if (hs_ep->dir_in) {
                epreg = DIEPCTL(index);
                epctl = dwc2_readl(hs->regs + epreg);
@@ -3020,7 +3036,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
        int ret = 0;
 
        spin_lock_irqsave(&hs->lock, flags);
-       ret = dwc2_hsotg_ep_sethalt(ep, value);
+       ret = dwc2_hsotg_ep_sethalt(ep, value, false);
        spin_unlock_irqrestore(&hs->lock, flags);
 
        return ret;
index 7ddf9449a06302ed7c72bb692ed4e5cc52bd40a5..654050684f4fadb4004587edcb653ca03c0d4f3f 100644 (file)
 #define DWC3_DEPCMD_GET_RSC_IDX(x)     (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
 #define DWC3_DEPCMD_STATUS(x)          (((x) >> 12) & 0x0F)
 #define DWC3_DEPCMD_HIPRI_FORCERM      (1 << 11)
+#define DWC3_DEPCMD_CLEARPENDIN                (1 << 11)
 #define DWC3_DEPCMD_CMDACT             (1 << 10)
 #define DWC3_DEPCMD_CMDIOC             (1 << 8)
 
index dd5cb5577dcaf62d411fb9776e47a23d4a4dacf7..2f1fb7e7aa548f831fb9cfa63d24513c292d15b4 100644 (file)
@@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, exynos);
 
-       ret = dwc3_exynos_register_phys(exynos);
-       if (ret) {
-               dev_err(dev, "couldn't register PHYs\n");
-               return ret;
-       }
-
        exynos->dev     = dev;
 
        exynos->clk = devm_clk_get(dev, "usbdrd30");
@@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
                goto err3;
        }
 
+       ret = dwc3_exynos_register_phys(exynos);
+       if (ret) {
+               dev_err(dev, "couldn't register PHYs\n");
+               goto err4;
+       }
+
        if (node) {
                ret = of_platform_populate(node, NULL, NULL, dev);
                if (ret) {
                        dev_err(dev, "failed to add dwc3 core\n");
-                       goto err4;
+                       goto err5;
                }
        } else {
                dev_err(dev, "no device node, failed to add dwc3 core\n");
                ret = -ENODEV;
-               goto err4;
+               goto err5;
        }
 
        return 0;
 
+err5:
+       platform_device_unregister(exynos->usb2_phy);
+       platform_device_unregister(exynos->usb3_phy);
 err4:
        regulator_disable(exynos->vdd10);
 err3:
index 5c0adb9c6fb27baa778615f17705ac6ab7896079..89a2f712fdfe32f5fc0a6fc0681b0d8db005e2a2 100644 (file)
@@ -129,12 +129,18 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
        switch (dwc3_data->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
 
-               val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID
+               val &= ~(USB3_DELAY_VBUSVALID
                        | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
                        | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
                        | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
 
-               val |= USB3_DEVICE_NOT_HOST;
+               /*
+                * USB3_PORT2_FORCE_VBUSVALID When '1' and when
+                * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
+                * of the pico PHY to 1.
+                */
+
+               val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
                break;
 
        case USB_DR_MODE_HOST:
@@ -227,7 +233,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
        dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
                 dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
 
-       dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");
+       dwc3_data->rstc_pwrdn =
+               devm_reset_control_get_exclusive(dev, "powerdown");
        if (IS_ERR(dwc3_data->rstc_pwrdn)) {
                dev_err(&pdev->dev, "could not get power controller\n");
                ret = PTR_ERR(dwc3_data->rstc_pwrdn);
@@ -237,7 +244,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
        /* Manage PowerDown */
        reset_control_deassert(dwc3_data->rstc_pwrdn);
 
-       dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset");
+       dwc3_data->rstc_rst =
+               devm_reset_control_get_shared(dev, "softreset");
        if (IS_ERR(dwc3_data->rstc_rst)) {
                dev_err(&pdev->dev, "could not get reset controller\n");
                ret = PTR_ERR(dwc3_data->rstc_rst);
index 9a7d0bd15dc311c2a697678f05a7cc67ca3ef2ad..07248ff1be5c150b04c2d0f6297290489c94c205 100644 (file)
@@ -347,6 +347,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
        return ret;
 }
 
+static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
+{
+       struct dwc3 *dwc = dep->dwc;
+       struct dwc3_gadget_ep_cmd_params params;
+       u32 cmd = DWC3_DEPCMD_CLEARSTALL;
+
+       /*
+        * As of core revision 2.60a the recommended programming model
+        * is to set the ClearPendIN bit when issuing a Clear Stall EP
+        * command for IN endpoints. This is to prevent an issue where
+        * some (non-compliant) hosts may not send ACK TPs for pending
+        * IN transfers due to a mishandled error condition. Synopsys
+        * STAR 9000614252.
+        */
+       if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
+               cmd |= DWC3_DEPCMD_CLEARPENDIN;
+
+       memset(&params, 0, sizeof(params));
+
+       return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+}
+
 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
                struct dwc3_trb *trb)
 {
@@ -1314,8 +1336,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
                else
                        dep->flags |= DWC3_EP_STALL;
        } else {
-               ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                       DWC3_DEPCMD_CLEARSTALL, &params);
+               ret = dwc3_send_clear_stall_ep_cmd(dep);
                if (ret)
                        dev_err(dwc->dev, "failed to clear STALL on %s\n",
                                        dep->name);
@@ -2247,7 +2268,6 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
 
        for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
                struct dwc3_ep *dep;
-               struct dwc3_gadget_ep_cmd_params params;
                int ret;
 
                dep = dwc->eps[epnum];
@@ -2259,9 +2279,7 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
 
                dep->flags &= ~DWC3_EP_STALL;
 
-               memset(&params, 0, sizeof(params));
-               ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                               DWC3_DEPCMD_CLEARSTALL, &params);
+               ret = dwc3_send_clear_stall_ep_cmd(dep);
                WARN_ON_ONCE(ret);
        }
 }
index d67de0d22a2be52b853f734e16bed29f27041bad..eb648485a58c571001c4e31b7c79aadaaee04445 100644 (file)
@@ -1868,14 +1868,19 @@ unknown:
                                }
                                break;
                        }
-                       req->length = value;
-                       req->context = cdev;
-                       req->zero = value < w_length;
-                       value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
-                       if (value < 0) {
-                               DBG(cdev, "ep_queue --> %d\n", value);
-                               req->status = 0;
-                               composite_setup_complete(gadget->ep0, req);
+
+                       if (value >= 0) {
+                               req->length = value;
+                               req->context = cdev;
+                               req->zero = value < w_length;
+                               value = composite_ep0_queue(cdev, req,
+                                                           GFP_ATOMIC);
+                               if (value < 0) {
+                                       DBG(cdev, "ep_queue --> %d\n", value);
+                                       req->status = 0;
+                                       composite_setup_complete(gadget->ep0,
+                                                                req);
+                               }
                        }
                        return value;
                }
index b6f60ca8a0357be74845d78dcb04b7ca1d7dda98..70cf3477f951a11931032c2c73d59ace5b7a1163 100644 (file)
@@ -1401,6 +1401,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
                .owner          = THIS_MODULE,
                .name           = "configfs-gadget",
        },
+       .match_existing_only = 1,
 };
 
 static struct config_group *gadgets_make(
index 73515d54e1cc1138cdb2ab5c74d480cc780dfd29..cc33d26674080a1359ed7be97410bb640453b469 100644 (file)
@@ -2051,7 +2051,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
 
                if (len < sizeof(*d) ||
                    d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-                   d->Reserved1)
+                   !d->Reserved1)
                        return -EINVAL;
                for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
                        if (d->Reserved2[i])
@@ -2729,6 +2729,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
                func->ffs->ss_descs_count;
 
        int fs_len, hs_len, ss_len, ret, i;
+       struct ffs_ep *eps_ptr;
 
        /* Make it a single chunk, less management later on */
        vla_group(d);
@@ -2777,12 +2778,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
               ffs->raw_descs_length);
 
        memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
-       for (ret = ffs->eps_count; ret; --ret) {
-               struct ffs_ep *ptr;
-
-               ptr = vla_ptr(vlabuf, d, eps);
-               ptr[ret].num = -1;
-       }
+       eps_ptr = vla_ptr(vlabuf, d, eps);
+       for (i = 0; i < ffs->eps_count; i++)
+               eps_ptr[i].num = -1;
 
        /* Save pointers
         * d_eps == vlabuf, func->eps used to kfree vlabuf later
@@ -2851,7 +2849,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
                goto error;
 
        func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
-       if (c->cdev->use_os_string)
+       if (c->cdev->use_os_string) {
                for (i = 0; i < ffs->interfaces_count; ++i) {
                        struct usb_os_desc *desc;
 
@@ -2862,13 +2860,15 @@ static int _ffs_func_bind(struct usb_configuration *c,
                                vla_ptr(vlabuf, d, ext_compat) + i * 16;
                        INIT_LIST_HEAD(&desc->ext_prop);
                }
-       ret = ffs_do_os_descs(ffs->ms_os_descs_count,
-                             vla_ptr(vlabuf, d, raw_descs) +
-                             fs_len + hs_len + ss_len,
-                             d_raw_descs__sz - fs_len - hs_len - ss_len,
-                             __ffs_func_bind_do_os_desc, func);
-       if (unlikely(ret < 0))
-               goto error;
+               ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+                                     vla_ptr(vlabuf, d, raw_descs) +
+                                     fs_len + hs_len + ss_len,
+                                     d_raw_descs__sz - fs_len - hs_len -
+                                     ss_len,
+                                     __ffs_func_bind_do_os_desc, func);
+               if (unlikely(ret < 0))
+                       goto error;
+       }
        func->function.os_desc_n =
                c->cdev->use_os_string ? ffs->interfaces_count : 0;
 
index c45104e3a64bfe147e93b4055ef7ad7d21b1cd2a..64706a789580dd3bb1641b151d15f0b0831c43ed 100644 (file)
@@ -161,14 +161,6 @@ static struct usb_endpoint_descriptor hs_ep_out_desc = {
        .wMaxPacketSize =       cpu_to_le16(512)
 };
 
-static struct usb_qualifier_descriptor dev_qualifier = {
-       .bLength =              sizeof(dev_qualifier),
-       .bDescriptorType =      USB_DT_DEVICE_QUALIFIER,
-       .bcdUSB =               cpu_to_le16(0x0200),
-       .bDeviceClass =         USB_CLASS_PRINTER,
-       .bNumConfigurations =   1
-};
-
 static struct usb_descriptor_header *hs_printer_function[] = {
        (struct usb_descriptor_header *) &intf_desc,
        (struct usb_descriptor_header *) &hs_ep_in_desc,
index 35fe3c80cfc09a432cd4793a9ea125781bf4525f..197f73386fac9ab45473de09db462ab5e4640d90 100644 (file)
@@ -1445,16 +1445,18 @@ static void usbg_drop_tpg(struct se_portal_group *se_tpg)
        for (i = 0; i < TPG_INSTANCES; ++i)
                if (tpg_instances[i].tpg == tpg)
                        break;
-       if (i < TPG_INSTANCES)
+       if (i < TPG_INSTANCES) {
                tpg_instances[i].tpg = NULL;
-       opts = container_of(tpg_instances[i].func_inst,
-               struct f_tcm_opts, func_inst);
-       mutex_lock(&opts->dep_lock);
-       if (opts->has_dep)
-               module_put(opts->dependent);
-       else
-               configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
-       mutex_unlock(&opts->dep_lock);
+               opts = container_of(tpg_instances[i].func_inst,
+                       struct f_tcm_opts, func_inst);
+               mutex_lock(&opts->dep_lock);
+               if (opts->has_dep)
+                       module_put(opts->dependent);
+               else
+                       configfs_undepend_item_unlocked(
+                               &opts->func_inst.group.cg_item);
+               mutex_unlock(&opts->dep_lock);
+       }
        mutex_unlock(&tpg_instances_lock);
 
        kfree(tpg);
index 186d4b1625248462ec51e099ef9677d2dacc788d..cd214ec8a601b4913b67f7d67e52fb8c57e003c4 100644 (file)
@@ -598,18 +598,6 @@ static struct usb_gadget_strings *fn_strings[] = {
        NULL,
 };
 
-static struct usb_qualifier_descriptor devqual_desc = {
-       .bLength = sizeof devqual_desc,
-       .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
-
-       .bcdUSB = cpu_to_le16(0x200),
-       .bDeviceClass = USB_CLASS_MISC,
-       .bDeviceSubClass = 0x02,
-       .bDeviceProtocol = 0x01,
-       .bNumConfigurations = 1,
-       .bRESERVED = 0,
-};
-
 static struct usb_interface_assoc_descriptor iad_desc = {
        .bLength = sizeof iad_desc,
        .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
@@ -1292,6 +1280,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 
        if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
                struct cntrl_cur_lay3 c;
+               memset(&c, 0, sizeof(struct cntrl_cur_lay3));
 
                if (entity_id == USB_IN_CLK_ID)
                        c.dCUR = p_srate;
index d62683017cf3c1c052f78eb243799e1ca46ca8d5..990df221c6299a2cd9d2ecb9aab54fda5447560e 100644 (file)
@@ -83,9 +83,7 @@ EXPORT_SYMBOL_GPL(fsg_fs_function);
  * USB 2.0 devices need to expose both high speed and full speed
  * descriptors, unless they only run at full speed.
  *
- * That means alternate endpoint descriptors (bigger packets)
- * and a "device qualifier" ... plus more construction options
- * for the configuration descriptor.
+ * That means alternate endpoint descriptors (bigger packets).
  */
 struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
index e64479f882a52e787822b865eb447e8c1e484f9a..aa3707bdebb4ad7fc2d56f50bbe80ab7093cf878 100644 (file)
@@ -938,8 +938,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
                        struct usb_ep           *ep = dev->gadget->ep0;
                        struct usb_request      *req = dev->req;
 
-                       if ((retval = setup_req (ep, req, 0)) == 0)
-                               retval = usb_ep_queue (ep, req, GFP_ATOMIC);
+                       if ((retval = setup_req (ep, req, 0)) == 0) {
+                               spin_unlock_irq (&dev->lock);
+                               retval = usb_ep_queue (ep, req, GFP_KERNEL);
+                               spin_lock_irq (&dev->lock);
+                       }
                        dev->state = STATE_DEV_CONNECTED;
 
                        /* assume that was SET_CONFIGURATION */
@@ -1457,8 +1460,11 @@ delegate:
                                                        w_length);
                                if (value < 0)
                                        break;
+
+                               spin_unlock (&dev->lock);
                                value = usb_ep_queue (gadget->ep0, dev->req,
-                                                       GFP_ATOMIC);
+                                                       GFP_KERNEL);
+                               spin_lock (&dev->lock);
                                if (value < 0) {
                                        clean_req (gadget->ep0, dev->req);
                                        break;
@@ -1481,11 +1487,14 @@ delegate:
        if (value >= 0 && dev->state != STATE_DEV_SETUP) {
                req->length = value;
                req->zero = value < w_length;
-               value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+
+               spin_unlock (&dev->lock);
+               value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
                if (value < 0) {
                        DBG (dev, "ep_queue --> %d\n", value);
                        req->status = 0;
                }
+               return value;
        }
 
        /* device stalls when value < 0 */
index 6e8300d6a7372043ded9916c540fbd2d4c6b05fd..e1b2dcebdc2ecb741e850355a546325f9b3121ff 100644 (file)
@@ -603,11 +603,15 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
                }
        }
 
-       list_add_tail(&driver->pending, &gadget_driver_pending_list);
-       pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
-               driver->function);
+       if (!driver->match_existing_only) {
+               list_add_tail(&driver->pending, &gadget_driver_pending_list);
+               pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+                       driver->function);
+               ret = 0;
+       }
+
        mutex_unlock(&udc_lock);
-       return 0;
+       return ret;
 found:
        ret = udc_bind_to_driver(udc, driver);
        mutex_unlock(&udc_lock);
index ae1b6e69eb965b88dd13a3934d026af25d27c808..a962b89b65a665df737e8ebd4f48646064805334 100644 (file)
@@ -368,6 +368,15 @@ static void ehci_shutdown(struct usb_hcd *hcd)
 {
        struct ehci_hcd *ehci = hcd_to_ehci(hcd);
 
+       /**
+        * Protect the system from crashing at system shutdown in cases where
+        * usb host is not added yet from OTG controller driver.
+        * As ehci_setup() not done yet, so stop accessing registers or
+        * variables initialized in ehci_setup()
+        */
+       if (!ehci->sbrn)
+               return;
+
        spin_lock_irq(&ehci->lock);
        ehci->shutdown = true;
        ehci->rh_state = EHCI_RH_STOPPING;
index ffc90295a95f284e7e234f121429b5f957dedc07..74f62d68f01362d144e8de21bd188c06e31a7945 100644 (file)
@@ -872,14 +872,22 @@ int ehci_hub_control(
 ) {
        struct ehci_hcd *ehci = hcd_to_ehci (hcd);
        int             ports = HCS_N_PORTS (ehci->hcs_params);
-       u32 __iomem     *status_reg = &ehci->regs->port_status[
-                               (wIndex & 0xff) - 1];
-       u32 __iomem     *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
+       u32 __iomem     *status_reg, *hostpc_reg;
        u32             temp, temp1, status;
        unsigned long   flags;
        int             retval = 0;
        unsigned        selector;
 
+       /*
+        * Avoid underflow while calculating (wIndex & 0xff) - 1.
+        * The compiler might deduce that wIndex can never be 0 and then
+        * optimize away the tests for !wIndex below.
+        */
+       temp = wIndex & 0xff;
+       temp -= (temp > 0);
+       status_reg = &ehci->regs->port_status[temp];
+       hostpc_reg = &ehci->regs->hostpc[temp];
+
        /*
         * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
         * HCS_INDICATOR may say we can change LEDs to off/amber/green.
index d3afc89d00f533089a207662bff530e9672d6c2e..2f8d3af811ce0689b15eb1f19b98edd092d61198 100644 (file)
@@ -179,22 +179,32 @@ static int ehci_msm_remove(struct platform_device *pdev)
 static int ehci_msm_pm_suspend(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
        bool do_wakeup = device_may_wakeup(dev);
 
        dev_dbg(dev, "ehci-msm PM suspend\n");
 
-       return ehci_suspend(hcd, do_wakeup);
+       /* Only call ehci_suspend if ehci_setup has been done */
+       if (ehci->sbrn)
+               return ehci_suspend(hcd, do_wakeup);
+
+       return 0;
 }
 
 static int ehci_msm_pm_resume(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
 
        dev_dbg(dev, "ehci-msm PM resume\n");
-       ehci_resume(hcd, false);
+
+       /* Only call ehci_resume if ehci_setup has been done */
+       if (ehci->sbrn)
+               ehci_resume(hcd, false);
 
        return 0;
 }
+
 #else
 #define ehci_msm_pm_suspend    NULL
 #define ehci_msm_pm_resume     NULL
index a94ed677d93747c143d01879a175bad23f8f795f..be4a2788fc5828ea172c5c72f5e5f5b5822f052d 100644 (file)
@@ -206,7 +206,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
                priv->clk48 = NULL;
        }
 
-       priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+       priv->pwr =
+               devm_reset_control_get_optional_shared(&dev->dev, "power");
        if (IS_ERR(priv->pwr)) {
                err = PTR_ERR(priv->pwr);
                if (err == -EPROBE_DEFER)
@@ -214,7 +215,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
                priv->pwr = NULL;
        }
 
-       priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+       priv->rst =
+               devm_reset_control_get_optional_shared(&dev->dev, "softreset");
        if (IS_ERR(priv->rst)) {
                err = PTR_ERR(priv->rst);
                if (err == -EPROBE_DEFER)
index 4031b372008ef06c4f6f29c34b3f3399f736d00b..9a3d7db5be57cb001ac982516e0e5d847c14409a 100644 (file)
@@ -81,15 +81,23 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
        struct usb_hcd *hcd = platform_get_drvdata(pdev);
        struct tegra_ehci_hcd *tegra =
                (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+       bool has_utmi_pad_registers = false;
 
        phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
        if (!phy_np)
                return -ENOENT;
 
+       if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers"))
+               has_utmi_pad_registers = true;
+
        if (!usb1_reset_attempted) {
                struct reset_control *usb1_reset;
 
-               usb1_reset = of_reset_control_get(phy_np, "usb");
+               if (!has_utmi_pad_registers)
+                       usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
+               else
+                       usb1_reset = tegra->rst;
+
                if (IS_ERR(usb1_reset)) {
                        dev_warn(&pdev->dev,
                                 "can't get utmi-pads reset from the PHY\n");
@@ -99,13 +107,15 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
                        reset_control_assert(usb1_reset);
                        udelay(1);
                        reset_control_deassert(usb1_reset);
+
+                       if (!has_utmi_pad_registers)
+                               reset_control_put(usb1_reset);
                }
 
-               reset_control_put(usb1_reset);
                usb1_reset_attempted = true;
        }
 
-       if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) {
+       if (!has_utmi_pad_registers) {
                reset_control_assert(tegra->rst);
                udelay(1);
                reset_control_deassert(tegra->rst);
index d029bbe9eb36a884fed45fd6f648bfca51199d65..641fed609911578ca8481ebb7a830f7ae418a924 100644 (file)
@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
 {
        int     branch;
 
-       ed->state = ED_OPER;
        ed->ed_prev = NULL;
        ed->ed_next = NULL;
        ed->hwNextED = 0;
@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
        /* the HC may not see the schedule updates yet, but if it does
         * then they'll be properly ordered.
         */
+
+       ed->state = ED_OPER;
        return 0;
 }
 
index acf2eb2a56766358c2bbdef931b070c2c81ad43d..02816a1515a1dc4dc0ec06506855bf0fc57a799c 100644 (file)
@@ -188,13 +188,15 @@ static int st_ohci_platform_probe(struct platform_device *dev)
                priv->clk48 = NULL;
        }
 
-       priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+       priv->pwr =
+               devm_reset_control_get_optional_shared(&dev->dev, "power");
        if (IS_ERR(priv->pwr)) {
                err = PTR_ERR(priv->pwr);
                goto err_put_clks;
        }
 
-       priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+       priv->rst =
+               devm_reset_control_get_optional_shared(&dev->dev, "softreset");
        if (IS_ERR(priv->rst)) {
                err = PTR_ERR(priv->rst);
                goto err_put_clks;
index 48672fac7ff3eb22389acfb4889a513e8ac79948..c10972fcc8e489fe07598c9cc036574c926a4a4b 100644 (file)
@@ -37,6 +37,7 @@
 /* Device for a quirk */
 #define PCI_VENDOR_ID_FRESCO_LOGIC     0x1b73
 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009      0x1009
 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400      0x1400
 
 #define PCI_VENDOR_ID_ETRON            0x1b6f
@@ -114,6 +115,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        }
 
+       if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+                       pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
+               xhci->quirks |= XHCI_BROKEN_STREAMS;
+
        if (pdev->vendor == PCI_VENDOR_ID_NEC)
                xhci->quirks |= XHCI_NEC_HOST;
 
index 676ea458148b8e87631c317b0356a381ccc04e31..1f3f981fe7f8182b4b4b1707aa4d5f439e556fe6 100644 (file)
@@ -196,6 +196,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
                ret = clk_prepare_enable(clk);
                if (ret)
                        goto put_hcd;
+       } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto put_hcd;
        }
 
        xhci = hcd_to_xhci(hcd);
index 52deae4b7eac2567201f0277d27cfd89c1b01499..d7d502578d799a59c735d8a8ce777f1f6157498d 100644 (file)
@@ -290,6 +290,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 
        temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
        xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+
+       /*
+        * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
+        * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
+        * but the completion event in never sent. Use the cmd timeout timer to
+        * handle those cases. Use twice the time to cover the bit polling retry
+        */
+       mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
        xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
                        &xhci->op_regs->cmd_ring);
 
@@ -314,6 +322,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 
                xhci_err(xhci, "Stopped the command ring failed, "
                                "maybe the host is dead\n");
+               del_timer(&xhci->cmd_timer);
                xhci->xhc_state |= XHCI_STATE_DYING;
                xhci_quiesce(xhci);
                xhci_halt(xhci);
@@ -1246,22 +1255,21 @@ void xhci_handle_command_timeout(unsigned long data)
        int ret;
        unsigned long flags;
        u64 hw_ring_state;
-       struct xhci_command *cur_cmd = NULL;
+       bool second_timeout = false;
        xhci = (struct xhci_hcd *) data;
 
        /* mark this command to be cancelled */
        spin_lock_irqsave(&xhci->lock, flags);
        if (xhci->current_cmd) {
-               cur_cmd = xhci->current_cmd;
-               cur_cmd->status = COMP_CMD_ABORT;
+               if (xhci->current_cmd->status == COMP_CMD_ABORT)
+                       second_timeout = true;
+               xhci->current_cmd->status = COMP_CMD_ABORT;
        }
 
-
        /* Make sure command ring is running before aborting it */
        hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
        if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
            (hw_ring_state & CMD_RING_RUNNING))  {
-
                spin_unlock_irqrestore(&xhci->lock, flags);
                xhci_dbg(xhci, "Command timeout\n");
                ret = xhci_abort_cmd_ring(xhci);
@@ -1273,6 +1281,15 @@ void xhci_handle_command_timeout(unsigned long data)
                }
                return;
        }
+
+       /* command ring failed to restart, or host removed. Bail out */
+       if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
+               xhci_cleanup_command_queue(xhci);
+               return;
+       }
+
        /* command timeout on stopped ring, ring can't be aborted */
        xhci_dbg(xhci, "Command timeout on stopped ring\n");
        xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
@@ -2721,7 +2738,8 @@ hw_died:
                writel(irq_pending, &xhci->ir_set->irq_pending);
        }
 
-       if (xhci->xhc_state & XHCI_STATE_DYING) {
+       if (xhci->xhc_state & XHCI_STATE_DYING ||
+           xhci->xhc_state & XHCI_STATE_HALTED) {
                xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
                                "Shouldn't IRQs be disabled?\n");
                /* Clear the event handler busy flag (RW1C);
index fa7e1ef36cd943648b606fa18da18943b3ad7cdc..f2f9518c53ab437548133349101983389c8de78b 100644 (file)
@@ -685,20 +685,23 @@ void xhci_stop(struct usb_hcd *hcd)
        u32 temp;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-       if (xhci->xhc_state & XHCI_STATE_HALTED)
-               return;
-
        mutex_lock(&xhci->mutex);
-       spin_lock_irq(&xhci->lock);
-       xhci->xhc_state |= XHCI_STATE_HALTED;
-       xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
 
-       /* Make sure the xHC is halted for a USB3 roothub
-        * (xhci_stop() could be called as part of failed init).
-        */
-       xhci_halt(xhci);
-       xhci_reset(xhci);
-       spin_unlock_irq(&xhci->lock);
+       if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
+               spin_lock_irq(&xhci->lock);
+
+               xhci->xhc_state |= XHCI_STATE_HALTED;
+               xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+               xhci_halt(xhci);
+               xhci_reset(xhci);
+
+               spin_unlock_irq(&xhci->lock);
+       }
+
+       if (!usb_hcd_is_primary_hcd(hcd)) {
+               mutex_unlock(&xhci->mutex);
+               return;
+       }
 
        xhci_cleanup_msix(xhci);
 
@@ -4886,7 +4889,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
        xhci_print_registers(xhci);
 
-       xhci->quirks = quirks;
+       xhci->quirks |= quirks;
 
        get_quirks(dev, xhci);
 
index 39fd95833eb8317b63f4f1e1a781f611bfcda5cd..f824336def5c558de1362f9dead14880ba2fb88f 100644 (file)
@@ -1090,29 +1090,6 @@ void musb_stop(struct musb *musb)
        musb_platform_try_idle(musb, 0);
 }
 
-static void musb_shutdown(struct platform_device *pdev)
-{
-       struct musb     *musb = dev_to_musb(&pdev->dev);
-       unsigned long   flags;
-
-       pm_runtime_get_sync(musb->controller);
-
-       musb_host_cleanup(musb);
-       musb_gadget_cleanup(musb);
-
-       spin_lock_irqsave(&musb->lock, flags);
-       musb_platform_disable(musb);
-       musb_generic_disable(musb);
-       spin_unlock_irqrestore(&musb->lock, flags);
-
-       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
-       musb_platform_exit(musb);
-
-       pm_runtime_put(musb->controller);
-       /* FIXME power down */
-}
-
-
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -1702,7 +1679,7 @@ EXPORT_SYMBOL_GPL(musb_dma_completion);
 #define use_dma                        0
 #endif
 
-static void (*musb_phy_callback)(enum musb_vbus_id_status status);
+static int (*musb_phy_callback)(enum musb_vbus_id_status status);
 
 /*
  * musb_mailbox - optional phy notifier function
@@ -1711,11 +1688,12 @@ static void (*musb_phy_callback)(enum musb_vbus_id_status status);
  * Optionally gets called from the USB PHY. Note that the USB PHY must be
  * disabled at the point the phy_callback is registered or unregistered.
  */
-void musb_mailbox(enum musb_vbus_id_status status)
+int musb_mailbox(enum musb_vbus_id_status status)
 {
        if (musb_phy_callback)
-               musb_phy_callback(status);
+               return musb_phy_callback(status);
 
+       return -ENODEV;
 };
 EXPORT_SYMBOL_GPL(musb_mailbox);
 
@@ -2028,11 +2006,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        musb_readl = musb_default_readl;
        musb_writel = musb_default_writel;
 
-       /* We need musb_read/write functions initialized for PM */
-       pm_runtime_use_autosuspend(musb->controller);
-       pm_runtime_set_autosuspend_delay(musb->controller, 200);
-       pm_runtime_enable(musb->controller);
-
        /* The musb_platform_init() call:
         *   - adjusts musb->mregs
         *   - sets the musb->isr
@@ -2134,6 +2107,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (musb->ops->phy_callback)
                musb_phy_callback = musb->ops->phy_callback;
 
+       /*
+        * We need musb_read/write functions initialized for PM.
+        * Note that at least 2430 glue needs autosuspend delay
+        * somewhere above 300 ms for the hardware to idle properly
+        * after disconnecting the cable in host mode. Let's use
+        * 500 ms for some margin.
+        */
+       pm_runtime_use_autosuspend(musb->controller);
+       pm_runtime_set_autosuspend_delay(musb->controller, 500);
+       pm_runtime_enable(musb->controller);
        pm_runtime_get_sync(musb->controller);
 
        status = usb_phy_init(musb->xceiv);
@@ -2237,13 +2220,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (status)
                goto fail5;
 
-       pm_runtime_put(musb->controller);
-
-       /*
-        * For why this is currently needed, see commit 3e43a0725637
-        * ("usb: musb: core: add pm_runtime_irq_safe()")
-        */
-       pm_runtime_irq_safe(musb->controller);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        return 0;
 
@@ -2265,7 +2243,9 @@ fail2_5:
        usb_phy_shutdown(musb->xceiv);
 
 err_usb_phy_init:
+       pm_runtime_dont_use_autosuspend(musb->controller);
        pm_runtime_put_sync(musb->controller);
+       pm_runtime_disable(musb->controller);
 
 fail2:
        if (musb->irq_wake)
@@ -2273,7 +2253,6 @@ fail2:
        musb_platform_exit(musb);
 
 fail1:
-       pm_runtime_disable(musb->controller);
        dev_err(musb->controller,
                "musb_init_controller failed with status %d\n", status);
 
@@ -2312,6 +2291,7 @@ static int musb_remove(struct platform_device *pdev)
 {
        struct device   *dev = &pdev->dev;
        struct musb     *musb = dev_to_musb(dev);
+       unsigned long   flags;
 
        /* this gets called on rmmod.
         *  - Host mode: host may still be active
@@ -2319,17 +2299,26 @@ static int musb_remove(struct platform_device *pdev)
         *  - OTG mode: both roles are deactivated (or never-activated)
         */
        musb_exit_debugfs(musb);
-       musb_shutdown(pdev);
-       musb_phy_callback = NULL;
-
-       if (musb->dma_controller)
-               musb_dma_controller_destroy(musb->dma_controller);
-
-       usb_phy_shutdown(musb->xceiv);
 
        cancel_work_sync(&musb->irq_work);
        cancel_delayed_work_sync(&musb->finish_resume_work);
        cancel_delayed_work_sync(&musb->deassert_reset_work);
+       pm_runtime_get_sync(musb->controller);
+       musb_host_cleanup(musb);
+       musb_gadget_cleanup(musb);
+       spin_lock_irqsave(&musb->lock, flags);
+       musb_platform_disable(musb);
+       musb_generic_disable(musb);
+       spin_unlock_irqrestore(&musb->lock, flags);
+       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+       pm_runtime_dont_use_autosuspend(musb->controller);
+       pm_runtime_put_sync(musb->controller);
+       pm_runtime_disable(musb->controller);
+       musb_platform_exit(musb);
+       musb_phy_callback = NULL;
+       if (musb->dma_controller)
+               musb_dma_controller_destroy(musb->dma_controller);
+       usb_phy_shutdown(musb->xceiv);
        musb_free(musb);
        device_init_wakeup(dev, 0);
        return 0;
@@ -2429,7 +2418,8 @@ static void musb_restore_context(struct musb *musb)
        musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
        musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
        musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
-       musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
+       if (musb->context.devctl & MUSB_DEVCTL_SESSION)
+               musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
 
        for (i = 0; i < musb->config->num_eps; ++i) {
                struct musb_hw_ep       *hw_ep;
@@ -2612,7 +2602,6 @@ static struct platform_driver musb_driver = {
        },
        .probe          = musb_probe,
        .remove         = musb_remove,
-       .shutdown       = musb_shutdown,
 };
 
 module_platform_driver(musb_driver);
index b6afe9e4330545d6b3bd1d432374d61f7cabbf3f..b55a776b03eb5d37f4bbb3ec337656f3d0fc3c55 100644 (file)
@@ -215,7 +215,7 @@ struct musb_platform_ops {
                                dma_addr_t *dma_addr, u32 *len);
        void    (*pre_root_reset_end)(struct musb *musb);
        void    (*post_root_reset_end)(struct musb *musb);
-       void    (*phy_callback)(enum musb_vbus_id_status status);
+       int     (*phy_callback)(enum musb_vbus_id_status status);
 };
 
 /*
@@ -312,6 +312,7 @@ struct musb {
        struct work_struct      irq_work;
        struct delayed_work     deassert_reset_work;
        struct delayed_work     finish_resume_work;
+       struct delayed_work     gadget_work;
        u16                     hwvers;
 
        u16                     intrrxe;
index 152865b3652295caf52340ca86397d5494f34c84..af2a3a7addf904a736e13d81d95f5caac5630f50 100644 (file)
@@ -1656,6 +1656,20 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
        return usb_phy_set_power(musb->xceiv, mA);
 }
 
+static void musb_gadget_work(struct work_struct *work)
+{
+       struct musb *musb;
+       unsigned long flags;
+
+       musb = container_of(work, struct musb, gadget_work.work);
+       pm_runtime_get_sync(musb->controller);
+       spin_lock_irqsave(&musb->lock, flags);
+       musb_pullup(musb, musb->softconnect);
+       spin_unlock_irqrestore(&musb->lock, flags);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
+}
+
 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
 {
        struct musb     *musb = gadget_to_musb(gadget);
@@ -1663,20 +1677,16 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
 
        is_on = !!is_on;
 
-       pm_runtime_get_sync(musb->controller);
-
        /* NOTE: this assumes we are sensing vbus; we'd rather
         * not pullup unless the B-session is active.
         */
        spin_lock_irqsave(&musb->lock, flags);
        if (is_on != musb->softconnect) {
                musb->softconnect = is_on;
-               musb_pullup(musb, is_on);
+               schedule_delayed_work(&musb->gadget_work, 0);
        }
        spin_unlock_irqrestore(&musb->lock, flags);
 
-       pm_runtime_put(musb->controller);
-
        return 0;
 }
 
@@ -1845,7 +1855,7 @@ int musb_gadget_setup(struct musb *musb)
 #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
        musb->g.is_otg = 0;
 #endif
-
+       INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
        musb_g_init_endpoints(musb);
 
        musb->is_active = 0;
@@ -1866,6 +1876,8 @@ void musb_gadget_cleanup(struct musb *musb)
 {
        if (musb->port_mode == MUSB_PORT_MODE_HOST)
                return;
+
+       cancel_delayed_work_sync(&musb->gadget_work);
        usb_del_gadget_udc(&musb->g);
 }
 
@@ -1914,8 +1926,8 @@ static int musb_gadget_start(struct usb_gadget *g,
        if (musb->xceiv->last_event == USB_EVENT_ID)
                musb_platform_set_vbus(musb, 1);
 
-       if (musb->xceiv->last_event == USB_EVENT_NONE)
-               pm_runtime_put(musb->controller);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        return 0;
 
@@ -1934,8 +1946,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
        struct musb     *musb = gadget_to_musb(g);
        unsigned long   flags;
 
-       if (musb->xceiv->last_event == USB_EVENT_NONE)
-               pm_runtime_get_sync(musb->controller);
+       pm_runtime_get_sync(musb->controller);
 
        /*
         * REVISIT always use otg_set_peripheral() here too;
@@ -1963,7 +1974,8 @@ static int musb_gadget_stop(struct usb_gadget *g)
         * that currently misbehaves.
         */
 
-       pm_runtime_put(musb->controller);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        return 0;
 }
index 2f8ad7f1f482cd55116b8cb3a7fd575fc79c360a..d227a71d85e19cffef3a11662bb9a23affa18fb3 100644 (file)
@@ -434,7 +434,13 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
                }
        }
 
-       if (qh != NULL && qh->is_ready) {
+       /*
+        * The pipe must be broken if current urb->status is set, so don't
+        * start next urb.
+        * TODO: to minimize the risk of regression, only check urb->status
+        * for RX, until we have a test case to understand the behavior of TX.
+        */
+       if ((!status || !is_in) && qh && qh->is_ready) {
                dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
                    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
                musb_start_urb(musb, is_in, qh);
@@ -594,14 +600,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
                musb_writew(ep->regs, MUSB_TXCSR, 0);
 
        /* scrub all previous state, clearing toggle */
-       } else {
-               csr = musb_readw(ep->regs, MUSB_RXCSR);
-               if (csr & MUSB_RXCSR_RXPKTRDY)
-                       WARNING("rx%d, packet/%d ready?\n", ep->epnum,
-                               musb_readw(ep->regs, MUSB_RXCOUNT));
-
-               musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
        }
+       csr = musb_readw(ep->regs, MUSB_RXCSR);
+       if (csr & MUSB_RXCSR_RXPKTRDY)
+               WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+                       musb_readw(ep->regs, MUSB_RXCOUNT));
+
+       musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
 
        /* target addr and (for multipoint) hub addr/port */
        if (musb->is_multipoint) {
@@ -627,7 +632,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
        ep->rx_reinit = 0;
 }
 
-static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
+static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
                struct musb_hw_ep *hw_ep, struct musb_qh *qh,
                struct urb *urb, u32 offset,
                u32 *length, u8 *mode)
@@ -664,23 +669,18 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
        }
        channel->desired_mode = *mode;
        musb_writew(epio, MUSB_TXCSR, csr);
-
-       return 0;
 }
 
-static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
-                                         struct musb_hw_ep *hw_ep,
-                                         struct musb_qh *qh,
-                                         struct urb *urb,
-                                         u32 offset,
-                                         u32 *length,
-                                         u8 *mode)
+static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
+                                          struct musb_hw_ep *hw_ep,
+                                          struct musb_qh *qh,
+                                          struct urb *urb,
+                                          u32 offset,
+                                          u32 *length,
+                                          u8 *mode)
 {
        struct dma_channel *channel = hw_ep->tx_channel;
 
-       if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
-               return -ENODEV;
-
        channel->actual_len = 0;
 
        /*
@@ -688,8 +688,6 @@ static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
         * to identify the zero-length-final-packet case.
         */
        *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
-
-       return 0;
 }
 
 static bool musb_tx_dma_program(struct dma_controller *dma,
@@ -699,15 +697,14 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
        struct dma_channel      *channel = hw_ep->tx_channel;
        u16                     pkt_size = qh->maxpacket;
        u8                      mode;
-       int                     res;
 
        if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
-               res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb,
-                                                offset, &length, &mode);
+               musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
+                                           &length, &mode);
+       else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
+               musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
+                                              &length, &mode);
        else
-               res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
-                                                    offset, &length, &mode);
-       if (res)
                return false;
 
        qh->segsize = length;
@@ -995,9 +992,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
        if (is_in) {
                dma = is_dma_capable() ? ep->rx_channel : NULL;
 
-               /* clear nak timeout bit */
+               /*
+                * Need to stop the transaction by clearing REQPKT first
+                * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
+                * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
+                */
                rx_csr = musb_readw(epio, MUSB_RXCSR);
                rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+               rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+               musb_writew(epio, MUSB_RXCSR, rx_csr);
                rx_csr &= ~MUSB_RXCSR_DATAERROR;
                musb_writew(epio, MUSB_RXCSR, rx_csr);
 
@@ -1551,7 +1554,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
                                  struct urb *urb,
                                  size_t len)
 {
-       struct dma_channel *channel = hw_ep->tx_channel;
+       struct dma_channel *channel = hw_ep->rx_channel;
        void __iomem *epio = hw_ep->regs;
        dma_addr_t *buf;
        u32 length, res;
@@ -1870,6 +1873,9 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                status = -EPROTO;
                musb_writeb(epio, MUSB_RXINTERVAL, 0);
 
+               rx_csr &= ~MUSB_RXCSR_H_ERROR;
+               musb_writew(epio, MUSB_RXCSR, rx_csr);
+
        } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
 
                if (USB_ENDPOINT_XFER_ISOC != qh->type) {
index c84e0322c108710607abca65245ca4f368ba2da9..0b4cec940386e12da3b5263be910c3c218663204 100644 (file)
@@ -49,97 +49,14 @@ struct omap2430_glue {
        enum musb_vbus_id_status status;
        struct work_struct      omap_musb_mailbox_work;
        struct device           *control_otghs;
+       bool                    cable_connected;
+       bool                    enabled;
+       bool                    powered;
 };
 #define glue_to_musb(g)                platform_get_drvdata(g->musb)
 
 static struct omap2430_glue    *_glue;
 
-static struct timer_list musb_idle_timer;
-
-static void musb_do_idle(unsigned long _musb)
-{
-       struct musb     *musb = (void *)_musb;
-       unsigned long   flags;
-       u8      power;
-       u8      devctl;
-
-       spin_lock_irqsave(&musb->lock, flags);
-
-       switch (musb->xceiv->otg->state) {
-       case OTG_STATE_A_WAIT_BCON:
-
-               devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-               if (devctl & MUSB_DEVCTL_BDEVICE) {
-                       musb->xceiv->otg->state = OTG_STATE_B_IDLE;
-                       MUSB_DEV_MODE(musb);
-               } else {
-                       musb->xceiv->otg->state = OTG_STATE_A_IDLE;
-                       MUSB_HST_MODE(musb);
-               }
-               break;
-       case OTG_STATE_A_SUSPEND:
-               /* finish RESUME signaling? */
-               if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
-                       power = musb_readb(musb->mregs, MUSB_POWER);
-                       power &= ~MUSB_POWER_RESUME;
-                       dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
-                       musb_writeb(musb->mregs, MUSB_POWER, power);
-                       musb->is_active = 1;
-                       musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
-                                               | MUSB_PORT_STAT_RESUME);
-                       musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
-                       usb_hcd_poll_rh_status(musb->hcd);
-                       /* NOTE: it might really be A_WAIT_BCON ... */
-                       musb->xceiv->otg->state = OTG_STATE_A_HOST;
-               }
-               break;
-       case OTG_STATE_A_HOST:
-               devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-               if (devctl &  MUSB_DEVCTL_BDEVICE)
-                       musb->xceiv->otg->state = OTG_STATE_B_IDLE;
-               else
-                       musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
-       default:
-               break;
-       }
-       spin_unlock_irqrestore(&musb->lock, flags);
-}
-
-
-static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
-{
-       unsigned long           default_timeout = jiffies + msecs_to_jiffies(3);
-       static unsigned long    last_timer;
-
-       if (timeout == 0)
-               timeout = default_timeout;
-
-       /* Never idle if active, or when VBUS timeout is not set as host */
-       if (musb->is_active || ((musb->a_wait_bcon == 0)
-                       && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
-               dev_dbg(musb->controller, "%s active, deleting timer\n",
-                       usb_otg_state_string(musb->xceiv->otg->state));
-               del_timer(&musb_idle_timer);
-               last_timer = jiffies;
-               return;
-       }
-
-       if (time_after(last_timer, timeout)) {
-               if (!timer_pending(&musb_idle_timer))
-                       last_timer = timeout;
-               else {
-                       dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
-                       return;
-               }
-       }
-       last_timer = timeout;
-
-       dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
-               usb_otg_state_string(musb->xceiv->otg->state),
-               (unsigned long)jiffies_to_msecs(timeout - jiffies));
-       mod_timer(&musb_idle_timer, timeout);
-}
-
 static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
 {
        struct usb_otg  *otg = musb->xceiv->otg;
@@ -205,16 +122,6 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
                musb_readb(musb->mregs, MUSB_DEVCTL));
 }
 
-static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
-{
-       u8      devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
-       devctl |= MUSB_DEVCTL_SESSION;
-       musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
-       return 0;
-}
-
 static inline void omap2430_low_level_exit(struct musb *musb)
 {
        u32 l;
@@ -234,22 +141,63 @@ static inline void omap2430_low_level_init(struct musb *musb)
        musb_writel(musb->mregs, OTG_FORCESTDBY, l);
 }
 
-static void omap2430_musb_mailbox(enum musb_vbus_id_status status)
+/*
+ * We can get multiple cable events so we need to keep track
+ * of the power state. Only keep power enabled if USB cable is
+ * connected and a gadget is started.
+ */
+static void omap2430_set_power(struct musb *musb, bool enabled, bool cable)
+{
+       struct device *dev = musb->controller;
+       struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+       bool power_up;
+       int res;
+
+       if (glue->enabled != enabled)
+               glue->enabled = enabled;
+
+       if (glue->cable_connected != cable)
+               glue->cable_connected = cable;
+
+       power_up = glue->enabled && glue->cable_connected;
+       if (power_up == glue->powered) {
+               dev_warn(musb->controller, "power state already %i\n",
+                        power_up);
+               return;
+       }
+
+       glue->powered = power_up;
+
+       if (power_up) {
+               res = pm_runtime_get_sync(musb->controller);
+               if (res < 0) {
+                       dev_err(musb->controller, "could not enable: %i", res);
+                       glue->powered = false;
+               }
+       } else {
+               pm_runtime_mark_last_busy(musb->controller);
+               pm_runtime_put_autosuspend(musb->controller);
+       }
+}
+
+static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
 {
        struct omap2430_glue    *glue = _glue;
 
        if (!glue) {
                pr_err("%s: musb core is not yet initialized\n", __func__);
-               return;
+               return -EPROBE_DEFER;
        }
        glue->status = status;
 
        if (!glue_to_musb(glue)) {
                pr_err("%s: musb core is not yet ready\n", __func__);
-               return;
+               return -EPROBE_DEFER;
        }
 
        schedule_work(&glue->omap_musb_mailbox_work);
+
+       return 0;
 }
 
 static void omap_musb_set_mailbox(struct omap2430_glue *glue)
@@ -259,6 +207,13 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
        struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
        struct omap_musb_board_data *data = pdata->board_data;
        struct usb_otg *otg = musb->xceiv->otg;
+       bool cable_connected;
+
+       cable_connected = ((glue->status == MUSB_ID_GROUND) ||
+                          (glue->status == MUSB_VBUS_VALID));
+
+       if (cable_connected)
+               omap2430_set_power(musb, glue->enabled, cable_connected);
 
        switch (glue->status) {
        case MUSB_ID_GROUND:
@@ -268,7 +223,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                musb->xceiv->otg->state = OTG_STATE_A_IDLE;
                musb->xceiv->last_event = USB_EVENT_ID;
                if (musb->gadget_driver) {
-                       pm_runtime_get_sync(dev);
                        omap_control_usb_set_mode(glue->control_otghs,
                                USB_MODE_HOST);
                        omap2430_musb_set_vbus(musb, 1);
@@ -281,8 +235,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                otg->default_a = false;
                musb->xceiv->otg->state = OTG_STATE_B_IDLE;
                musb->xceiv->last_event = USB_EVENT_VBUS;
-               if (musb->gadget_driver)
-                       pm_runtime_get_sync(dev);
                omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
                break;
 
@@ -291,11 +243,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                dev_dbg(dev, "VBUS Disconnect\n");
 
                musb->xceiv->last_event = USB_EVENT_NONE;
-               if (musb->gadget_driver) {
+               if (musb->gadget_driver)
                        omap2430_musb_set_vbus(musb, 0);
-                       pm_runtime_mark_last_busy(dev);
-                       pm_runtime_put_autosuspend(dev);
-               }
 
                if (data->interface_type == MUSB_INTERFACE_UTMI)
                        otg_set_vbus(musb->xceiv->otg, 0);
@@ -307,6 +256,9 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                dev_dbg(dev, "ID float\n");
        }
 
+       if (!cable_connected)
+               omap2430_set_power(musb, glue->enabled, cable_connected);
+
        atomic_notifier_call_chain(&musb->xceiv->notifier,
                        musb->xceiv->last_event, NULL);
 }
@@ -316,13 +268,8 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
 {
        struct omap2430_glue *glue = container_of(mailbox_work,
                                struct omap2430_glue, omap_musb_mailbox_work);
-       struct musb *musb = glue_to_musb(glue);
-       struct device *dev = musb->controller;
 
-       pm_runtime_get_sync(dev);
        omap_musb_set_mailbox(glue);
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
 }
 
 static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -389,23 +336,7 @@ static int omap2430_musb_init(struct musb *musb)
                return PTR_ERR(musb->phy);
        }
        musb->isr = omap2430_musb_interrupt;
-
-       /*
-        * Enable runtime PM for musb parent (this driver). We can't
-        * do it earlier as struct musb is not yet allocated and we
-        * need to touch the musb registers for runtime PM.
-        */
-       pm_runtime_enable(glue->dev);
-       status = pm_runtime_get_sync(glue->dev);
-       if (status < 0)
-               goto err1;
-
-       status = pm_runtime_get_sync(dev);
-       if (status < 0) {
-               dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
-               pm_runtime_put_sync(glue->dev);
-               goto err1;
-       }
+       phy_init(musb->phy);
 
        l = musb_readl(musb->mregs, OTG_INTERFSEL);
 
@@ -427,20 +358,10 @@ static int omap2430_musb_init(struct musb *musb)
                        musb_readl(musb->mregs, OTG_INTERFSEL),
                        musb_readl(musb->mregs, OTG_SIMENABLE));
 
-       setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
-
        if (glue->status != MUSB_UNKNOWN)
                omap_musb_set_mailbox(glue);
 
-       phy_init(musb->phy);
-       phy_power_on(musb->phy);
-
-       pm_runtime_put_noidle(musb->controller);
-       pm_runtime_put_noidle(glue->dev);
        return 0;
-
-err1:
-       return status;
 }
 
 static void omap2430_musb_enable(struct musb *musb)
@@ -452,6 +373,11 @@ static void omap2430_musb_enable(struct musb *musb)
        struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
        struct omap_musb_board_data *data = pdata->board_data;
 
+       if (!WARN_ON(!musb->phy))
+               phy_power_on(musb->phy);
+
+       omap2430_set_power(musb, true, glue->cable_connected);
+
        switch (glue->status) {
 
        case MUSB_ID_GROUND:
@@ -487,18 +413,25 @@ static void omap2430_musb_disable(struct musb *musb)
        struct device *dev = musb->controller;
        struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
 
+       if (!WARN_ON(!musb->phy))
+               phy_power_off(musb->phy);
+
        if (glue->status != MUSB_UNKNOWN)
                omap_control_usb_set_mode(glue->control_otghs,
                        USB_MODE_DISCONNECT);
+
+       omap2430_set_power(musb, false, glue->cable_connected);
 }
 
 static int omap2430_musb_exit(struct musb *musb)
 {
-       del_timer_sync(&musb_idle_timer);
+       struct device *dev = musb->controller;
+       struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
 
        omap2430_low_level_exit(musb);
-       phy_power_off(musb->phy);
        phy_exit(musb->phy);
+       musb->phy = NULL;
+       cancel_work_sync(&glue->omap_musb_mailbox_work);
 
        return 0;
 }
@@ -512,9 +445,6 @@ static const struct musb_platform_ops omap2430_ops = {
        .init           = omap2430_musb_init,
        .exit           = omap2430_musb_exit,
 
-       .set_mode       = omap2430_musb_set_mode,
-       .try_idle       = omap2430_musb_try_idle,
-
        .set_vbus       = omap2430_musb_set_vbus,
 
        .enable         = omap2430_musb_enable,
@@ -639,11 +569,9 @@ static int omap2430_probe(struct platform_device *pdev)
                goto err2;
        }
 
-       /*
-        * Note that we cannot enable PM runtime yet for this
-        * driver as we need struct musb initialized first.
-        * See omap2430_musb_init above.
-        */
+       pm_runtime_enable(glue->dev);
+       pm_runtime_use_autosuspend(glue->dev);
+       pm_runtime_set_autosuspend_delay(glue->dev, 500);
 
        ret = platform_device_add(musb);
        if (ret) {
@@ -662,12 +590,14 @@ err0:
 
 static int omap2430_remove(struct platform_device *pdev)
 {
-       struct omap2430_glue            *glue = platform_get_drvdata(pdev);
+       struct omap2430_glue *glue = platform_get_drvdata(pdev);
+       struct musb *musb = glue_to_musb(glue);
 
        pm_runtime_get_sync(glue->dev);
-       cancel_work_sync(&glue->omap_musb_mailbox_work);
        platform_device_unregister(glue->musb);
+       omap2430_set_power(musb, false, false);
        pm_runtime_put_sync(glue->dev);
+       pm_runtime_dont_use_autosuspend(glue->dev);
        pm_runtime_disable(glue->dev);
 
        return 0;
@@ -680,12 +610,13 @@ static int omap2430_runtime_suspend(struct device *dev)
        struct omap2430_glue            *glue = dev_get_drvdata(dev);
        struct musb                     *musb = glue_to_musb(glue);
 
-       if (musb) {
-               musb->context.otg_interfsel = musb_readl(musb->mregs,
-                               OTG_INTERFSEL);
+       if (!musb)
+               return 0;
 
-               omap2430_low_level_exit(musb);
-       }
+       musb->context.otg_interfsel = musb_readl(musb->mregs,
+                                                OTG_INTERFSEL);
+
+       omap2430_low_level_exit(musb);
 
        return 0;
 }
@@ -696,7 +627,7 @@ static int omap2430_runtime_resume(struct device *dev)
        struct musb                     *musb = glue_to_musb(glue);
 
        if (!musb)
-               return -EPROBE_DEFER;
+               return 0;
 
        omap2430_low_level_init(musb);
        musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -738,18 +669,8 @@ static struct platform_driver omap2430_driver = {
        },
 };
 
+module_platform_driver(omap2430_driver);
+
 MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
 MODULE_LICENSE("GPL v2");
-
-static int __init omap2430_init(void)
-{
-       return platform_driver_register(&omap2430_driver);
-}
-subsys_initcall(omap2430_init);
-
-static void __exit omap2430_exit(void)
-{
-       platform_driver_unregister(&omap2430_driver);
-}
-module_exit(omap2430_exit);
index fdab4232cfbf606e268e301b4f06701765d0eaa2..76500515dd8ba0884eae778253527f52601f2219 100644 (file)
@@ -80,7 +80,8 @@ static struct musb *sunxi_musb;
 
 struct sunxi_glue {
        struct device           *dev;
-       struct platform_device  *musb;
+       struct musb             *musb;
+       struct platform_device  *musb_pdev;
        struct clk              *clk;
        struct reset_control    *rst;
        struct phy              *phy;
@@ -102,7 +103,7 @@ static void sunxi_musb_work(struct work_struct *work)
                return;
 
        if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
-               struct musb *musb = platform_get_drvdata(glue->musb);
+               struct musb *musb = glue->musb;
                unsigned long flags;
                u8 devctl;
 
@@ -112,7 +113,7 @@ static void sunxi_musb_work(struct work_struct *work)
                if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
                        set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
                        musb->xceiv->otg->default_a = 1;
-                       musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+                       musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
                        MUSB_HST_MODE(musb);
                        devctl |= MUSB_DEVCTL_SESSION;
                } else {
@@ -145,10 +146,12 @@ static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
 {
        struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
 
-       if (is_on)
+       if (is_on) {
                set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
-       else
+               musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+       } else {
                clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+       }
 
        schedule_work(&glue->work);
 }
@@ -264,15 +267,6 @@ static int sunxi_musb_init(struct musb *musb)
        if (ret)
                goto error_unregister_notifier;
 
-       if (musb->port_mode == MUSB_PORT_MODE_HOST) {
-               ret = phy_power_on(glue->phy);
-               if (ret)
-                       goto error_phy_exit;
-               set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
-               /* Stop musb work from turning vbus off again */
-               set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
-       }
-
        musb->isr = sunxi_musb_interrupt;
 
        /* Stop the musb-core from doing runtime pm (not supported on sunxi) */
@@ -280,8 +274,6 @@ static int sunxi_musb_init(struct musb *musb)
 
        return 0;
 
-error_phy_exit:
-       phy_exit(glue->phy);
 error_unregister_notifier:
        if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
                extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
@@ -323,10 +315,31 @@ static int sunxi_musb_exit(struct musb *musb)
        return 0;
 }
 
+static int sunxi_set_mode(struct musb *musb, u8 mode)
+{
+       struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+       int ret;
+
+       if (mode == MUSB_HOST) {
+               ret = phy_power_on(glue->phy);
+               if (ret)
+                       return ret;
+
+               set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+               /* Stop musb work from turning vbus off again */
+               set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+               musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+       }
+
+       return 0;
+}
+
 static void sunxi_musb_enable(struct musb *musb)
 {
        struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
 
+       glue->musb = musb;
+
        /* musb_core does not call us in a balanced manner */
        if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
                return;
@@ -569,6 +582,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
        .exit           = sunxi_musb_exit,
        .enable         = sunxi_musb_enable,
        .disable        = sunxi_musb_disable,
+       .set_mode       = sunxi_set_mode,
        .fifo_offset    = sunxi_musb_fifo_offset,
        .ep_offset      = sunxi_musb_ep_offset,
        .busctl_offset  = sunxi_musb_busctl_offset,
@@ -721,9 +735,9 @@ static int sunxi_musb_probe(struct platform_device *pdev)
        pinfo.data      = &pdata;
        pinfo.size_data = sizeof(pdata);
 
-       glue->musb = platform_device_register_full(&pinfo);
-       if (IS_ERR(glue->musb)) {
-               ret = PTR_ERR(glue->musb);
+       glue->musb_pdev = platform_device_register_full(&pinfo);
+       if (IS_ERR(glue->musb_pdev)) {
+               ret = PTR_ERR(glue->musb_pdev);
                dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
                goto err_unregister_usb_phy;
        }
@@ -740,7 +754,7 @@ static int sunxi_musb_remove(struct platform_device *pdev)
        struct sunxi_glue *glue = platform_get_drvdata(pdev);
        struct platform_device *usb_phy = glue->usb_phy;
 
-       platform_device_unregister(glue->musb); /* Frees glue ! */
+       platform_device_unregister(glue->musb_pdev);
        usb_phy_generic_unregister(usb_phy);
 
        return 0;
index 24e2b3cf186759f81240232323d06d8dd03b4975..a72e8d670adc846b225d5cfd6fc27b43f5bc9bea 100644 (file)
@@ -97,6 +97,9 @@ struct twl6030_usb {
 
        struct regulator                *usb3v3;
 
+       /* used to check initial cable status after probe */
+       struct delayed_work     get_status_work;
+
        /* used to set vbus, in atomic path */
        struct work_struct      set_vbus_work;
 
@@ -227,12 +230,16 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
                        twl->asleep = 1;
                        status = MUSB_VBUS_VALID;
                        twl->linkstat = status;
-                       musb_mailbox(status);
+                       ret = musb_mailbox(status);
+                       if (ret)
+                               twl->linkstat = MUSB_UNKNOWN;
                } else {
                        if (twl->linkstat != MUSB_UNKNOWN) {
                                status = MUSB_VBUS_OFF;
                                twl->linkstat = status;
-                               musb_mailbox(status);
+                               ret = musb_mailbox(status);
+                               if (ret)
+                                       twl->linkstat = MUSB_UNKNOWN;
                                if (twl->asleep) {
                                        regulator_disable(twl->usb3v3);
                                        twl->asleep = 0;
@@ -264,7 +271,9 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
                twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
                status = MUSB_ID_GROUND;
                twl->linkstat = status;
-               musb_mailbox(status);
+               ret = musb_mailbox(status);
+               if (ret)
+                       twl->linkstat = MUSB_UNKNOWN;
        } else  {
                twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
                twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -274,6 +283,15 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
        return IRQ_HANDLED;
 }
 
+static void twl6030_status_work(struct work_struct *work)
+{
+       struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
+                                              get_status_work.work);
+
+       twl6030_usb_irq(twl->irq2, twl);
+       twl6030_usbotg_irq(twl->irq1, twl);
+}
+
 static int twl6030_enable_irq(struct twl6030_usb *twl)
 {
        twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -284,8 +302,6 @@ static int twl6030_enable_irq(struct twl6030_usb *twl)
                                REG_INT_MSK_LINE_C);
        twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
                                REG_INT_MSK_STS_C);
-       twl6030_usb_irq(twl->irq2, twl);
-       twl6030_usbotg_irq(twl->irq1, twl);
 
        return 0;
 }
@@ -371,6 +387,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
                dev_warn(&pdev->dev, "could not create sysfs file\n");
 
        INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
+       INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
 
        status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
                        IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -395,6 +412,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
 
        twl->asleep = 0;
        twl6030_enable_irq(twl);
+       schedule_delayed_work(&twl->get_status_work, HZ);
        dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
 
        return 0;
@@ -404,6 +422,7 @@ static int twl6030_usb_remove(struct platform_device *pdev)
 {
        struct twl6030_usb *twl = platform_get_drvdata(pdev);
 
+       cancel_delayed_work(&twl->get_status_work);
        twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
                REG_INT_MSK_LINE_C);
        twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
index 2eddbe538cda14d073a15bdaa5f5ad4a13eeb560..5608af4a369d47f1f63402a22173e2971a51fdaf 100644 (file)
@@ -2007,6 +2007,7 @@ static void mos7720_release(struct usb_serial *serial)
                                    urblist_entry)
                        usb_unlink_urb(urbtrack->urb);
                spin_unlock_irqrestore(&mos_parport->listlock, flags);
+               parport_del_port(mos_parport->pp);
 
                kref_put(&mos_parport->ref_count, destroy_mos_parport);
        }
index 4d49fce406e19aef87ec12004dcf3d2e31b5551c..5ef014ba6ae86917f34e6ebbd855845e3bf71732 100644 (file)
@@ -836,6 +836,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
        if (devinfo->flags & US_FL_BROKEN_FUA)
                sdev->broken_fua = 1;
 
+       scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
        return 0;
 }
 
@@ -848,7 +849,6 @@ static struct scsi_host_template uas_host_template = {
        .slave_configure = uas_slave_configure,
        .eh_abort_handler = uas_eh_abort_handler,
        .eh_bus_reset_handler = uas_eh_bus_reset_handler,
-       .can_queue = MAX_CMNDS,
        .this_id = -1,
        .sg_tablesize = SG_NONE,
        .skip_settle_delay = 1,
index fca51105974eabdd09094b7cd1e80bb945275129..2e0450bec1b13014cd257066a138a66e1ddd306d 100644 (file)
@@ -941,7 +941,7 @@ static void vhci_stop(struct usb_hcd *hcd)
 
 static int vhci_get_frame_number(struct usb_hcd *hcd)
 {
-       pr_err("Not yet implemented\n");
+       dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n");
        return 0;
 }
 
index 93601407dab8a04b44c8a379b9ff0d480c12303d..688691d9058dd98e134459fbaed507721ae507f8 100644 (file)
@@ -749,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
                if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
                        return count;
        } else {
-               if (pci_read_vpd(pdev, addr, 4, &data) != 4)
+               data = 0;
+               if (pci_read_vpd(pdev, addr, 4, &data) < 0)
                        return count;
                *pdata = cpu_to_le32(data);
        }
index e9ea3fef144a3164ab002bc013d1c36e30a7be45..15ecfc9c5f6c59e14cdf4477e401700df47e19ff 100644 (file)
@@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
 
 static void vfio_intx_disable(struct vfio_pci_device *vdev)
 {
-       vfio_intx_set_signal(vdev, -1);
        vfio_virqfd_disable(&vdev->ctx[0].unmask);
        vfio_virqfd_disable(&vdev->ctx[0].mask);
+       vfio_intx_set_signal(vdev, -1);
        vdev->irq_type = VFIO_PCI_NUM_IRQS;
        vdev->num_ctx = 0;
        kfree(vdev->ctx);
@@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
        struct pci_dev *pdev = vdev->pdev;
        int i;
 
-       vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
-
        for (i = 0; i < vdev->num_ctx; i++) {
                vfio_virqfd_disable(&vdev->ctx[i].unmask);
                vfio_virqfd_disable(&vdev->ctx[i].mask);
        }
 
+       vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+
        if (msix) {
                pci_disable_msix(vdev->pdev);
                kfree(vdev->msix);
index 15a65823aad9cb84f1f8e2696836a6156ffb7700..2ba19424e4a18f33555f370247ffc28204212089 100644 (file)
@@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
                          unsigned long pfn, long npage, int prot)
 {
        long i;
-       int ret;
+       int ret = 0;
 
        for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
                ret = iommu_map(domain->domain, iova,
index 8511c648a15c21cd5d7239e804ca1088c3e189bf..9d78411a3bf75123783df932cacc4dfb142b85bd 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/of.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <video/omap-panel-data.h>
 
 struct panel_drv_data {
@@ -25,7 +25,6 @@ struct panel_drv_data {
 
        struct omap_video_timings timings;
 
-       enum omap_dss_venc_type connector_type;
        bool invert_polarity;
 };
 
@@ -45,10 +44,6 @@ static const struct omap_video_timings tvc_pal_timings = {
 
 static const struct of_device_id tvc_of_match[];
 
-struct tvc_of_data {
-       enum omap_dss_venc_type connector_type;
-};
-
 #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
 
 static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +94,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
        in->ops.atv->set_timings(in, &ddata->timings);
 
        if (!ddata->dev->of_node) {
-               in->ops.atv->set_type(in, ddata->connector_type);
+               in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
 
                in->ops.atv->invert_vid_out_polarity(in,
                        ddata->invert_polarity);
@@ -207,7 +202,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
 
        ddata->in = in;
 
-       ddata->connector_type = pdata->connector_type;
        ddata->invert_polarity = pdata->invert_polarity;
 
        dssdev = &ddata->dssdev;
index d811e6dcaef727588cdc65695673a4f7144f0f30..06e1db34541e2cc8ba6bf51987428965532509c9 100644 (file)
@@ -16,8 +16,7 @@
 
 #include <drm/drm_edid.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 
 static const struct omap_video_timings dvic_default_timings = {
        .x_res          = 640,
@@ -236,46 +235,6 @@ static struct omap_dss_driver dvic_driver = {
        .detect         = dvic_detect,
 };
 
-static int dvic_probe_pdata(struct platform_device *pdev)
-{
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-       struct connector_dvi_platform_data *pdata;
-       struct omap_dss_device *in, *dssdev;
-       int i2c_bus_num;
-
-       pdata = dev_get_platdata(&pdev->dev);
-       i2c_bus_num = pdata->i2c_bus_num;
-
-       if (i2c_bus_num != -1) {
-               struct i2c_adapter *adapter;
-
-               adapter = i2c_get_adapter(i2c_bus_num);
-               if (!adapter) {
-                       dev_err(&pdev->dev,
-                                       "Failed to get I2C adapter, bus %d\n",
-                                       i2c_bus_num);
-                       return -EPROBE_DEFER;
-               }
-
-               ddata->i2c_adapter = adapter;
-       }
-
-       in = omap_dss_find_output(pdata->source);
-       if (in == NULL) {
-               i2c_put_adapter(ddata->i2c_adapter);
-
-               dev_err(&pdev->dev, "Failed to find video source\n");
-               return -EPROBE_DEFER;
-       }
-
-       ddata->in = in;
-
-       dssdev = &ddata->dssdev;
-       dssdev->name = pdata->name;
-
-       return 0;
-}
-
 static int dvic_probe_of(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -313,23 +272,18 @@ static int dvic_probe(struct platform_device *pdev)
        struct omap_dss_device *dssdev;
        int r;
 
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
        ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
        if (!ddata)
                return -ENOMEM;
 
        platform_set_drvdata(pdev, ddata);
 
-       if (dev_get_platdata(&pdev->dev)) {
-               r = dvic_probe_pdata(pdev);
-               if (r)
-                       return r;
-       } else if (pdev->dev.of_node) {
-               r = dvic_probe_of(pdev);
-               if (r)
-                       return r;
-       } else {
-               return -ENODEV;
-       }
+       r = dvic_probe_of(pdev);
+       if (r)
+               return r;
 
        ddata->timings = dvic_default_timings;
 
index 6ee4129bc0c064b851626c14913f1c16db0a4eec..58d5803ede67f11fbc12b96591227b87d719f391 100644 (file)
@@ -17,8 +17,7 @@
 
 #include <drm/drm_edid.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 
 static const struct omap_video_timings hdmic_default_timings = {
        .x_res          = 640,
@@ -206,30 +205,6 @@ static struct omap_dss_driver hdmic_driver = {
        .set_hdmi_infoframe     = hdmic_set_infoframe,
 };
 
-static int hdmic_probe_pdata(struct platform_device *pdev)
-{
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-       struct connector_hdmi_platform_data *pdata;
-       struct omap_dss_device *in, *dssdev;
-
-       pdata = dev_get_platdata(&pdev->dev);
-
-       ddata->hpd_gpio = -ENODEV;
-
-       in = omap_dss_find_output(pdata->source);
-       if (in == NULL) {
-               dev_err(&pdev->dev, "Failed to find video source\n");
-               return -EPROBE_DEFER;
-       }
-
-       ddata->in = in;
-
-       dssdev = &ddata->dssdev;
-       dssdev->name = pdata->name;
-
-       return 0;
-}
-
 static int hdmic_probe_of(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -261,6 +236,9 @@ static int hdmic_probe(struct platform_device *pdev)
        struct omap_dss_device *dssdev;
        int r;
 
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
        ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
        if (!ddata)
                return -ENOMEM;
@@ -268,17 +246,9 @@ static int hdmic_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, ddata);
        ddata->dev = &pdev->dev;
 
-       if (dev_get_platdata(&pdev->dev)) {
-               r = hdmic_probe_pdata(pdev);
-               if (r)
-                       return r;
-       } else if (pdev->dev.of_node) {
-               r = hdmic_probe_of(pdev);
-               if (r)
-                       return r;
-       } else {
-               return -ENODEV;
-       }
+       r = hdmic_probe_of(pdev);
+       if (r)
+               return r;
 
        if (gpio_is_valid(ddata->hpd_gpio)) {
                r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
index 8c246c213e06ea1bfaff43f49c34e4a903a6193a..a9a67167cc3d40f5fbc037a2c38ebd5a1a2af904 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/slab.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
index d9048b3df495c0b2ec11d2ddef1a702ef0df05c5..8c0953d069b7ec4df741f7daf3ea3cc99d510826 100644 (file)
@@ -15,8 +15,7 @@
 #include <linux/slab.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
@@ -166,32 +165,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = {
        .get_timings    = tfp410_get_timings,
 };
 
-static int tfp410_probe_pdata(struct platform_device *pdev)
-{
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-       struct encoder_tfp410_platform_data *pdata;
-       struct omap_dss_device *dssdev, *in;
-
-       pdata = dev_get_platdata(&pdev->dev);
-
-       ddata->pd_gpio = pdata->power_down_gpio;
-
-       ddata->data_lines = pdata->data_lines;
-
-       in = omap_dss_find_output(pdata->source);
-       if (in == NULL) {
-               dev_err(&pdev->dev, "Failed to find video source\n");
-               return -ENODEV;
-       }
-
-       ddata->in = in;
-
-       dssdev = &ddata->dssdev;
-       dssdev->name = pdata->name;
-
-       return 0;
-}
-
 static int tfp410_probe_of(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -225,23 +198,18 @@ static int tfp410_probe(struct platform_device *pdev)
        struct omap_dss_device *dssdev;
        int r;
 
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
        ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
        if (!ddata)
                return -ENOMEM;
 
        platform_set_drvdata(pdev, ddata);
 
-       if (dev_get_platdata(&pdev->dev)) {
-               r = tfp410_probe_pdata(pdev);
-               if (r)
-                       return r;
-       } else if (pdev->dev.of_node) {
-               r = tfp410_probe_of(pdev);
-               if (r)
-                       return r;
-       } else {
-               return -ENODEV;
-       }
+       r = tfp410_probe_of(pdev);
+       if (r)
+               return r;
 
        if (gpio_is_valid(ddata->pd_gpio)) {
                r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
index 677e2545fcbe680c7460aab3c1b25e1d6e7add2e..80dc47347e211a4ab8c16e4f3670025cb092f150 100644 (file)
@@ -16,8 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/gpio/consumer.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
index e780fd4f8b46b3cc24637985eeb663960b522429..ace3d818afe564049f5d048bd4acbbbb43b52fcc 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <video/omap-panel-data.h>
 #include <video/of_display_timing.h>
 
index 3414c2609320bcc54b9ae83efda9da6a13efd2c7..b58012b82b6fe93fb81b2429fb2d060c43c9bf9e 100644 (file)
@@ -25,8 +25,7 @@
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 #include <video/mipi_display.h>
 
 /* DSI Virtual channel. Hardcoded for now. */
@@ -1127,40 +1126,6 @@ static struct omap_dss_driver dsicm_ops = {
        .memory_read    = dsicm_memory_read,
 };
 
-static int dsicm_probe_pdata(struct platform_device *pdev)
-{
-       const struct panel_dsicm_platform_data *pdata;
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-       struct omap_dss_device *dssdev, *in;
-
-       pdata = dev_get_platdata(&pdev->dev);
-
-       in = omap_dss_find_output(pdata->source);
-       if (in == NULL) {
-               dev_err(&pdev->dev, "failed to find video source\n");
-               return -EPROBE_DEFER;
-       }
-       ddata->in = in;
-
-       ddata->reset_gpio = pdata->reset_gpio;
-
-       if (pdata->use_ext_te)
-               ddata->ext_te_gpio = pdata->ext_te_gpio;
-       else
-               ddata->ext_te_gpio = -1;
-
-       ddata->ulps_timeout = pdata->ulps_timeout;
-
-       ddata->use_dsi_backlight = pdata->use_dsi_backlight;
-
-       ddata->pin_config = pdata->pin_config;
-
-       dssdev = &ddata->dssdev;
-       dssdev->name = pdata->name;
-
-       return 0;
-}
-
 static int dsicm_probe_of(struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
@@ -1207,6 +1172,9 @@ static int dsicm_probe(struct platform_device *pdev)
 
        dev_dbg(dev, "probe\n");
 
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
        ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
        if (!ddata)
                return -ENOMEM;
@@ -1214,17 +1182,9 @@ static int dsicm_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, ddata);
        ddata->pdev = pdev;
 
-       if (dev_get_platdata(dev)) {
-               r = dsicm_probe_pdata(pdev);
-               if (r)
-                       return r;
-       } else if (pdev->dev.of_node) {
-               r = dsicm_probe_of(pdev);
-               if (r)
-                       return r;
-       } else {
-               return -ENODEV;
-       }
+       r = dsicm_probe_of(pdev);
+       if (r)
+               return r;
 
        ddata->timings.x_res = 864;
        ddata->timings.y_res = 480;
index 18eb60e9c9ecf5f09d9e17865a7d4f5f3c04c823..f14691ce8d020f850a8df3b86b9ccc9d538afb4b 100644 (file)
@@ -16,8 +16,7 @@
 #include <linux/mutex.h>
 #include <linux/gpio.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 
 static struct omap_video_timings lb035q02_timings = {
        .x_res = 320,
@@ -240,44 +239,6 @@ static struct omap_dss_driver lb035q02_ops = {
        .get_resolution = omapdss_default_get_resolution,
 };
 
-static int lb035q02_probe_pdata(struct spi_device *spi)
-{
-       const struct panel_lb035q02_platform_data *pdata;
-       struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
-       struct omap_dss_device *dssdev, *in;
-       int r;
-
-       pdata = dev_get_platdata(&spi->dev);
-
-       in = omap_dss_find_output(pdata->source);
-       if (in == NULL) {
-               dev_err(&spi->dev, "failed to find video source '%s'\n",
-                               pdata->source);
-               return -EPROBE_DEFER;
-       }
-
-       ddata->in = in;
-
-       ddata->data_lines = pdata->data_lines;
-
-       dssdev = &ddata->dssdev;
-       dssdev->name = pdata->name;
-
-       r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio,
-                                       GPIOF_OUT_INIT_LOW, "panel enable");
-       if (r)
-               goto err_gpio;
-
-       ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio);
-
-       ddata->backlight_gpio = pdata->backlight_gpio;
-
-       return 0;
-err_gpio:
-       omap_dss_put_device(ddata->in);
-       return r;
-}
-
 static int lb035q02_probe_of(struct spi_device *spi)
 {
        struct device_node *node = spi->dev.of_node;
@@ -312,6 +273,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
        struct omap_dss_device *dssdev;
        int r;
 
+       if (!spi->dev.of_node)
+               return -ENODEV;
+
        ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
        if (ddata == NULL)
                return -ENOMEM;
@@ -320,17 +284,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
 
        ddata->spi = spi;
 
-       if (dev_get_platdata(&spi->dev)) {
-               r = lb035q02_probe_pdata(spi);
-               if (r)
-                       return r;
-       } else if (spi->dev.of_node) {
-               r = lb035q02_probe_of(spi);
-               if (r)
-                       return r;
-       } else {
-               return -ENODEV;
-       }
+       r = lb035q02_probe_of(spi);
+       if (r)
+               return r;
 
        if (gpio_is_valid(ddata->backlight_gpio)) {
                r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
index 8a928c9a2fc9a72fb5bacb78ec8926b4d7160ed0..a2cbadd3eca301fbadf3a9e3e90b466e3c4bc741 100644 (file)
@@ -18,8 +18,7 @@
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 
 struct panel_drv_data {
        struct omap_dss_device  dssdev;
@@ -233,33 +232,6 @@ static struct omap_dss_driver nec_8048_ops = {
 };
 
 
-static int nec_8048_probe_pdata(struct spi_device *spi)
-{
-       const struct panel_nec_nl8048hl11_platform_data *pdata;
-       struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
-       struct omap_dss_device *dssdev, *in;
-
-       pdata = dev_get_platdata(&spi->dev);
-
-       ddata->qvga_gpio = pdata->qvga_gpio;
-       ddata->res_gpio = pdata->res_gpio;
-
-       in = omap_dss_find_output(pdata->source);
-       if (in == NULL) {
-               dev_err(&spi->dev, "failed to find video source '%s'\n",
-                               pdata->source);
-               return -EPROBE_DEFER;
-       }
-       ddata->in = in;
-
-       ddata->data_lines = pdata->data_lines;
-
-       dssdev = &ddata->dssdev;
-       dssdev->name = pdata->name;
-
-       return 0;
-}
-
 static int nec_8048_probe_of(struct spi_device *spi)
 {
        struct device_node *node = spi->dev.of_node;
@@ -296,6 +268,9 @@ static int nec_8048_probe(struct spi_device *spi)
 
        dev_dbg(&spi->dev, "%s\n", __func__);
 
+       if (!spi->dev.of_node)
+               return -ENODEV;
+
        spi->mode = SPI_MODE_0;
        spi->bits_per_word = 32;
 
@@ -315,17 +290,9 @@ static int nec_8048_probe(struct spi_device *spi)
 
        ddata->spi = spi;
 
-       if (dev_get_platdata(&spi->dev)) {
-               r = nec_8048_probe_pdata(spi);
-               if (r)
-                       return r;
-       } else if (spi->dev.of_node) {
-               r = nec_8048_probe_of(spi);
-               if (r)
-                       return r;
-       } else {
-               return -ENODEV;
-       }
+       r = nec_8048_probe_of(spi);
+       if (r)
+               return r;
 
        if (gpio_is_valid(ddata->qvga_gpio)) {
                r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
index 1954ec913ce5d188e47bc90f634c3a2909beb40a..a8be18a87fa0038e40d4f14a9fba625e2e320374 100644 (file)
@@ -17,8 +17,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/regulator/consumer.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
@@ -197,69 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = {
        .get_resolution = omapdss_default_get_resolution,
 };
 
-static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
-                 char *desc, struct gpio_desc **gpiod)
-{
-       int r;
-
-       r = devm_gpio_request_one(dev, gpio, flags, desc);
-       if (r) {
-               *gpiod = NULL;
-               return r == -ENOENT ? 0 : r;
-       }
-
-       *gpiod = gpio_to_desc(gpio);
-
-       return 0;
-}
-
-static int sharp_ls_probe_pdata(struct platform_device *pdev)
-{
-       const struct panel_sharp_ls037v7dw01_platform_data *pdata;
-       struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-       struct omap_dss_device *dssdev, *in;
-       int r;
-
-       pdata = dev_get_platdata(&pdev->dev);
-
-       in = omap_dss_find_output(pdata->source);
-       if (in == NULL) {
-               dev_err(&pdev->dev, "failed to find video source '%s'\n",
-                               pdata->source);
-               return -EPROBE_DEFER;
-       }
-
-       ddata->in = in;
-
-       ddata->data_lines = pdata->data_lines;
-
-       dssdev = &ddata->dssdev;
-       dssdev->name = pdata->name;
-
-       r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW,
-               "lcd MO", &ddata->mo_gpio);
-       if (r)
-               return r;
-       r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH,
-               "lcd LR", &ddata->lr_gpio);
-       if (r)
-               return r;
-       r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH,
-               "lcd UD", &ddata->ud_gpio);
-       if (r)
-               return r;
-       r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW,
-               "lcd RESB", &ddata->resb_gpio);
-       if (r)
-               return r;
-       r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW,
-               "lcd INI", &ddata->ini_gpio);
-       if (r)
-               return r;
-
-       return 0;
-}
-
 static  int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
        const char *desc, struct gpio_desc **gpiod)
 {
@@ -330,23 +266,18 @@ static int sharp_ls_probe(struct platform_device *pdev)
        struct omap_dss_device *dssdev;
        int r;
 
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
        ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
        if (ddata == NULL)
                return -ENOMEM;
 
        platform_set_drvdata(pdev, ddata);
 
-       if (dev_get_platdata(&pdev->dev)) {
-               r = sharp_ls_probe_pdata(pdev);
-               if (r)
-                       return r;
-       } else if (pdev->dev.of_node) {
-               r = sharp_ls_probe_of(pdev);
-               if (r)
-                       return r;
-       } else {
-               return -ENODEV;
-       }
+       r = sharp_ls_probe_of(pdev);
+       if (r)
+               return r;
 
        ddata->videomode = sharp_ls_timings;
 
index 31efcca801bdce78f68b6e963a4c80694f85b7d1..468560a6daaea16c891193f98369f4be9aabf63a 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <video/omap-panel-data.h>
 
 #define MIPID_CMD_READ_DISP_ID         0x04
index 4d657f3ab67914f0e5e5d77c450aaf4c2f6be048..b529a8c2b652cfa327ad0435024870e4b60c7ddc 100644 (file)
@@ -28,8 +28,7 @@
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/gpio.h>
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 
 struct panel_drv_data {
        struct omap_dss_device dssdev;
@@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = {
        .check_timings  = td028ttec1_panel_check_timings,
 };
 
-static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
-{
-       const struct panel_tpo_td028ttec1_platform_data *pdata;
-       struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
-       struct omap_dss_device *dssdev, *in;
-
-       pdata = dev_get_platdata(&spi->dev);
-
-       in = omap_dss_find_output(pdata->source);
-       if (in == NULL) {
-               dev_err(&spi->dev, "failed to find video source '%s'\n",
-                               pdata->source);
-               return -EPROBE_DEFER;
-       }
-
-       ddata->in = in;
-
-       ddata->data_lines = pdata->data_lines;
-
-       dssdev = &ddata->dssdev;
-       dssdev->name = pdata->name;
-
-       return 0;
-}
-
 static int td028ttec1_probe_of(struct spi_device *spi)
 {
        struct device_node *node = spi->dev.of_node;
@@ -415,6 +389,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
 
        dev_dbg(&spi->dev, "%s\n", __func__);
 
+       if (!spi->dev.of_node)
+               return -ENODEV;
+
        spi->bits_per_word = 9;
        spi->mode = SPI_MODE_3;
 
@@ -432,17 +409,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
 
        ddata->spi_dev = spi;
 
-       if (dev_get_platdata(&spi->dev)) {
-               r = td028ttec1_panel_probe_pdata(spi);
-               if (r)
-                       return r;
-       } else if (spi->dev.of_node) {
-               r = td028ttec1_probe_of(spi);
-               if (r)
-                       return r;
-       } else {
-               return -ENODEV;
-       }
+       r = td028ttec1_probe_of(spi);
+       if (r)
+               return r;
 
        ddata->videomode = td028ttec1_panel_timings;
 
index 68e3b68a29200675aed79653af0c6ccf4a27e852..51e628b85f4a097693cf9c922962eaf7659c32c6 100644 (file)
@@ -19,8 +19,7 @@
 #include <linux/slab.h>
 #include <linux/of_gpio.h>
 
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
+#include <video/omapfb_dss.h>
 
 #define TPO_R02_MODE(x)                ((x) & 7)
 #define TPO_R02_MODE_800x480   7
@@ -465,32 +464,6 @@ static struct omap_dss_driver tpo_td043_ops = {
 };
 
 
-static int tpo_td043_probe_pdata(struct spi_device *spi)
-{
-       const struct panel_tpo_td043mtea1_platform_data *pdata;
-       struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
-       struct omap_dss_device *dssdev, *in;
-
-       pdata = dev_get_platdata(&spi->dev);
-
-       ddata->nreset_gpio = pdata->nreset_gpio;
-
-       in = omap_dss_find_output(pdata->source);
-       if (in == NULL) {
-               dev_err(&spi->dev, "failed to find video source '%s'\n",
-                               pdata->source);
-               return -EPROBE_DEFER;
-       }
-       ddata->in = in;
-
-       ddata->data_lines = pdata->data_lines;
-
-       dssdev = &ddata->dssdev;
-       dssdev->name = pdata->name;
-
-       return 0;
-}
-
 static int tpo_td043_probe_of(struct spi_device *spi)
 {
        struct device_node *node = spi->dev.of_node;
@@ -524,6 +497,9 @@ static int tpo_td043_probe(struct spi_device *spi)
 
        dev_dbg(&spi->dev, "%s\n", __func__);
 
+       if (!spi->dev.of_node)
+               return -ENODEV;
+
        spi->bits_per_word = 16;
        spi->mode = SPI_MODE_0;
 
@@ -541,17 +517,9 @@ static int tpo_td043_probe(struct spi_device *spi)
 
        ddata->spi = spi;
 
-       if (dev_get_platdata(&spi->dev)) {
-               r = tpo_td043_probe_pdata(spi);
-               if (r)
-                       return r;
-       } else if (spi->dev.of_node) {
-               r = tpo_td043_probe_of(spi);
-               if (r)
-                       return r;
-       } else {
-               return -ENODEV;
-       }
+       r = tpo_td043_probe_of(spi);
+       if (r)
+               return r;
 
        ddata->mode = TPO_R02_MODE_800x480;
        memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
index 663ccc3bf4e5cebb63ef05250a1e5d9d8edeaa07..2481f4871f6640c8fc58571cede92149ba4cd733 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/spinlock.h>
 #include <linux/jiffies.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index 5a87179b73123517ca6f079dc1f74b3cee8b428d..29de4827589dc5cee33128e8b01c93eaacda805d 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/suspend.h>
 #include <linux/slab.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
@@ -208,8 +208,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
                core.default_display_name = def_disp_name;
        else if (pdata->default_display_name)
                core.default_display_name = pdata->default_display_name;
-       else if (pdata->default_device)
-               core.default_display_name = pdata->default_device->name;
 
        register_pm_notifier(&omap_dss_pm_notif_block);
 
index 6607db37a5e4be877729926e5f0628e532fd733d..3691bde4ce0a01a0d7ae76aaec3e3779db4d54c7 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/interrupt.h>
 #include <linux/seq_file.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index 5491e304f4fe9ead7dc2d136b8f65a4b96daa76e..7a75dfda98457c37965412233539bfeab534b265 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/of.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index 038c15b04215c8d11adfbde220be1f25b54d75e3..59c9a5c47ca95fb11d83fa3fb88f1cb18c22e9c3 100644 (file)
@@ -18,7 +18,7 @@
  */
 
 #include <linux/kernel.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dispc.h"
 
index 75b5286029ee44906243b2c23e1aade1a77d74ef..b3fdbfd0b82db6a2d5c7581e0e506511aa5eb5f6 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/platform_device.h>
 #include <linux/sysfs.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include "dss.h"
 
 static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
index ef5b9027985d63e50aecd7b91177e2b2030a61bb..dd5468695c434373f5ec9fe479177368cb5620ee 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/platform_device.h>
 #include <linux/of.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include "dss.h"
 #include "dss_features.h"
 
index 7953e6a523463080fc22569bd89dc3f7918c6dc4..da09806b940c74f1fc32ad2abf3870fcc095cbef 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/clk.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index d63e59807707966bc5ce43346f4c76d42fd140df..9e4800a4e3d11cff2ec29f78a260d4777c19784e 100644 (file)
@@ -42,7 +42,7 @@
 #include <linux/of_platform.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <video/mipi_display.h>
 
 #include "dss.h"
index bf407b6ba15ca0002166a0704124eeda0fb2052e..d356a252ab4a7f14d8042df74159b0732e0bdd6c 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/of.h>
 #include <linux/seq_file.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 
index 0078c4d1fc315a1ffd351d096b021db43cb74852..47d7f69ad9ad858a75595922f032341d319f442d 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/suspend.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index 0184a8461df1b0e7355cbb323149a66f752530b8..a3cc0ca8f9d240d3b5646e3eda4fc4193033fe81 100644 (file)
 #define FLD_MOD(orig, val, start, end) \
        (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
 
+enum omap_dss_clk_source {
+       OMAP_DSS_CLK_SRC_FCK = 0,               /* OMAP2/3: DSS1_ALWON_FCLK
+                                                * OMAP4: DSS_FCLK */
+       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC,   /* OMAP3: DSI1_PLL_FCLK
+                                                * OMAP4: PLL1_CLK1 */
+       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI,     /* OMAP3: DSI2_PLL_FCLK
+                                                * OMAP4: PLL1_CLK2 */
+       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC,  /* OMAP4: PLL2_CLK1 */
+       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI,    /* OMAP4: PLL2_CLK2 */
+};
+
 enum dss_io_pad_mode {
        DSS_IO_PAD_MODE_RESET,
        DSS_IO_PAD_MODE_RFBI,
index c886a2927f73f77d5e6054eec26f5d9d61427ee3..8fc843b56b268d1d30e87ddb1b990d7d04e10e6b 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index 53616b02b61371b1eb0d6c0c6819cb931cac39a9..f6de87e078b06df69e66683534ede423927be311 100644 (file)
@@ -23,7 +23,8 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/hdmi.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
+#include <sound/omap-hdmi-audio.h>
 
 #include "dss.h"
 
index 2e71aec838b1b5276d31a47cf1b5da77767b41a3..926a6f20dbb233ea541dc2dbbd1e90c0484e3a1a 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <sound/omap-hdmi-audio.h>
 
 #include "hdmi4_core.h"
index aade6d99662ab800fa03b5ab25e2a844bce822b1..0ee829a165c3a974f6fb6560543303a97cd761da 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
 #include <linux/component.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <sound/omap-hdmi-audio.h>
 
 #include "hdmi5_core.h"
index 8ea531d2652c4cd96fcefab1f6fec938494ebc71..bbfe7e2d43321ff538a31d5297d9781dd8e31c69 100644 (file)
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
 {
        void __iomem *base = core->base;
        const unsigned long long iclk = 266000000;      /* DSS L3 ICLK */
-       const unsigned ss_scl_high = 4000;              /* ns */
-       const unsigned ss_scl_low = 4700;               /* ns */
+       const unsigned ss_scl_high = 4600;              /* ns */
+       const unsigned ss_scl_low = 5400;               /* ns */
        const unsigned fs_scl_high = 600;               /* ns */
        const unsigned fs_scl_low = 1300;               /* ns */
        const unsigned sda_hold = 1000;                 /* ns */
@@ -442,7 +442,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
 
        c = (ptr[1] >> 6) & 0x3;
        m = (ptr[1] >> 4) & 0x3;
-       r = (ptr[1] >> 0) & 0x3;
+       r = (ptr[1] >> 0) & 0xf;
 
        itc = (ptr[2] >> 7) & 0x1;
        ec = (ptr[2] >> 4) & 0x7;
index 1b8fcc6c4ba1ba9381709d4970cb25a950640fdb..189a5ad125a37bc279c664cd61e37e5dcff780ac 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/err.h>
 #include <linux/of.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "hdmi.h"
 
index 1f5d19c119cea01dbf17801f9f149cf0e38afc06..9a13c35fd6d8069cf7c7fb86f5b287e595ffb311 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "hdmi.h"
index 06e23a7c432ccea22b02cfb32e47d2516ee757e2..eac3665aba6cbe25a257eb752d4f1d5c8a9ca141 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "hdmi.h"
index 7c544bc56fb5d4400b4d7c32a96809ce6a2dd2a1..705373e4cf3864edc19911509e0342966f1f00fd 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "hdmi.h"
index a7414fb12830260df190985a438161ef3c0e5b20..9e2a67fdf4d20129a581b40da118a72628d30372 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/platform_device.h>
 #include <linux/jiffies.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index 08a67f4f6a20e2599d3039f63ccf4681ad8bf5c6..69f86d2cc274db90070abccbe248b3924adab7fd 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/platform_device.h>
 #include <linux/jiffies.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
@@ -69,7 +69,6 @@ int dss_init_overlay_managers(void)
                        break;
                }
 
-               mgr->caps = 0;
                mgr->supported_displays =
                        dss_feat_get_supported_displays(mgr->id);
                mgr->supported_outputs =
index 16072159bd24344731f03a413459a4c290793d7e..bed9a978269df9d0b3bf80d5691fc4b204e4a292 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 
index 4cc5ddebfb340204fb05710d35dea5e189cb1d39..f1f6c0aea752f54a0f925c1338452e1ab69b9732 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/kobject.h>
 #include <linux/platform_device.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index 2f7cee985cdddea5e75178da51ce875525f255ce..d6c5d75d2ef8262d6b13f8286ad912dd33dfce4e 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index f974ddcd3b6e97563f455c3b4196e6641d7ed041..0564c5606cd06853a2a09a88a42c2e801565d9fd 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/sched.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 
index aea6a1d0fb201e29ab7d18551a6dc595e3991dbd..562b0c4ae0c6c57e3374801a31854fc28bd1dc1a 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include "dss.h"
 
 struct rfbi_reg { u16 idx; };
index d747cc6b59e14d87c167adeb9a526a9500ed460c..c4be732a4714c73d7e73225d8c965d8e0287d28d 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/of.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include "dss.h"
 
 static struct {
index 26e0ee30adf85b1c7a0f81fe3045272d8fa5b47c..392464da12e419ce518d790cd800d46089fd454e 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/of.h>
 #include <linux/component.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index b1ec59e42940c22ef2b63042dd4a523917bea64f..a890540f2037a5a8914de0e60c5fee4fa60ba2a5 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #include "dss.h"
 #include "dss_features.h"
index 9ddfdd63b84ced6a1f8e32c01ac7c38cdb83258f..ef69273074ba706752b52076b83e2cd070210fb2 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/export.h>
 #include <linux/sizes.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <video/omapvrfb.h>
 
 #include "omapfb.h"
index d3af01c94a58d07f45b6e55c95c91af44755b987..2fb90cb6803ffdceae66bf0ccea148848c5503f5 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/platform_device.h>
 #include <linux/omapfb.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <video/omapvrfb.h>
 
 #include "omapfb.h"
index 18fa9e1d003319e62388aa12bac398f12e5927ec..8087a009c54f89365022754a9e67d6820f06c75f 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/mm.h>
 #include <linux/omapfb.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 #include <video/omapvrfb.h>
 
 #include "omapfb.h"
index 623cd872a36788f3dc8cccd25fa53b475995cb3a..bcb9ff4a607d27189e865998e07a7b669ab4ddb7 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/dma-attrs.h>
 #include <linux/dma-mapping.h>
 
-#include <video/omapdss.h>
+#include <video/omapfb_dss.h>
 
 #ifdef DEBUG
 extern bool omapfb_debug;
index b54f26c55dfd1df46fcd5114660406edfd7e2820..b4b3e256491bfbe8c05fa0f7830c003541db8720 100644 (file)
@@ -746,7 +746,7 @@ config ALIM7101_WDT
 
 config EBC_C384_WDT
        tristate "WinSystems EBC-C384 Watchdog Timer"
-       depends on X86 && ISA
+       depends on X86 && ISA_BUS_API
        select WATCHDOG_CORE
        help
          Enables watchdog timer support for the watchdog timer on the
index d46839f51e730ff50e2c756a9f4b1326af82e2ec..e4db19e88ab15992b44c1c1f82d98938bca24d4b 100644 (file)
@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 static void balloon_process(struct work_struct *work);
 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
 
-static void release_memory_resource(struct resource *resource);
-
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state)
 }
 
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+static void release_memory_resource(struct resource *resource)
+{
+       if (!resource)
+               return;
+
+       /*
+        * No need to reset region to identity mapped since we now
+        * know that no I/O can be in this region
+        */
+       release_resource(resource);
+       kfree(resource);
+}
+
 static struct resource *additional_memory_resource(phys_addr_t size)
 {
        struct resource *res;
@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
        return res;
 }
 
-static void release_memory_resource(struct resource *resource)
-{
-       if (!resource)
-               return;
-
-       /*
-        * No need to reset region to identity mapped since we now
-        * know that no I/O can be in this region
-        */
-       release_resource(resource);
-       kfree(resource);
-}
-
 static enum bp_state reserve_additional_memory(void)
 {
        long credit;
index 076970a54f894b80366da951f220a97203466bb7..4ce10bcca18b1f600c351675142240dbe94a4022 100644 (file)
@@ -423,36 +423,7 @@ upload:
 
        return 0;
 }
-static int __init check_prereq(void)
-{
-       struct cpuinfo_x86 *c = &cpu_data(0);
-
-       if (!xen_initial_domain())
-               return -ENODEV;
-
-       if (!acpi_gbl_FADT.smi_command)
-               return -ENODEV;
-
-       if (c->x86_vendor == X86_VENDOR_INTEL) {
-               if (!cpu_has(c, X86_FEATURE_EST))
-                       return -ENODEV;
 
-               return 0;
-       }
-       if (c->x86_vendor == X86_VENDOR_AMD) {
-               /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
-                * as we get compile warnings for the static functions.
-                */
-#define CPUID_FREQ_VOLT_CAPABILITIES    0x80000007
-#define USE_HW_PSTATE                   0x00000080
-               u32 eax, ebx, ecx, edx;
-               cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
-               if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
-                       return -ENODEV;
-               return 0;
-       }
-       return -ENODEV;
-}
 /* acpi_perf_data is a pointer to percpu data. */
 static struct acpi_processor_performance __percpu *acpi_perf_data;
 
@@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
 static int __init xen_acpi_processor_init(void)
 {
        unsigned int i;
-       int rc = check_prereq();
+       int rc;
 
-       if (rc)
-               return rc;
+       if (!xen_initial_domain())
+               return -ENODEV;
 
        nr_acpi_bits = get_max_acpi_id() + 1;
        acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
index 8e67336f8ddd238d7763a118bb460327ba3278f7..6a25533da237db612f2bb4a99096a96fd49c0d8c 100644 (file)
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
                field_start = OFFSET(cfg_entry);
                field_end = OFFSET(cfg_entry) + field->size;
 
-               if ((req_start >= field_start && req_start < field_end)
-                   || (req_end > field_start && req_end <= field_end)) {
+                if (req_end > field_start && field_end > req_start) {
                        err = conf_space_read(dev, cfg_entry, field_start,
                                              &tmp_val);
                        if (err)
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
                field_start = OFFSET(cfg_entry);
                field_end = OFFSET(cfg_entry) + field->size;
 
-               if ((req_start >= field_start && req_start < field_end)
-                   || (req_end > field_start && req_end <= field_end)) {
+                if (req_end > field_start && field_end > req_start) {
                        tmp_val = 0;
 
                        err = xen_pcibk_config_read(dev, field_start,
index ad3d17d29c81171838d01a2f1a081c769407f02d..9ead1c2ff1ddd2e1340e57c70db6b53a7b34665e 100644 (file)
@@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
        /* A write to obtain the length must happen as a 32-bit write.
         * This does not (yet) support writing individual bytes
         */
-       if (value == ~PCI_ROM_ADDRESS_ENABLE)
+       if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
                bar->which = 1;
        else {
                u32 tmpval;
@@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev,
                           (PCI_BASE_ADDRESS_SPACE_MEMORY |
                                PCI_BASE_ADDRESS_MEM_TYPE_64))) {
                        bar_info->val = res[pos - 1].start >> 32;
-                       bar_info->len_val = res[pos - 1].end >> 32;
+                       bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
                        return;
                }
        }
 
+       if (!res[pos].flags ||
+           (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
+                              IORESOURCE_BUSY)))
+               return;
+
        bar_info->val = res[pos].start |
                        (res[pos].flags & PCI_REGION_FLAG_MASK);
-       bar_info->len_val = resource_size(&res[pos]);
+       bar_info->len_val = -resource_size(&res[pos]) |
+                           (res[pos].flags & PCI_REGION_FLAG_MASK);
 }
 
 static void *bar_init(struct pci_dev *dev, int offset)
 {
-       struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+       struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
 
        if (!bar)
                return ERR_PTR(-ENOMEM);
 
        read_dev_bar(dev, bar, offset, ~0);
-       bar->which = 0;
 
        return bar;
 }
 
 static void *rom_init(struct pci_dev *dev, int offset)
 {
-       struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+       struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
 
        if (!bar)
                return ERR_PTR(-ENOMEM);
 
        read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
-       bar->which = 0;
 
        return bar;
 }
index cacf30d14747baa20d5f2d8a6a999ebdcba14018..7487971f9f788b12a637216c29cc853bb933c1ca 100644 (file)
@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
                        rc = -ENOMEM;
                        goto out;
                }
+       } else {
+               list_for_each_entry(trans, &u->transactions, list)
+                       if (trans->handle.id == u->u.msg.tx_id)
+                               break;
+               if (&trans->list == &u->transactions)
+                       return -ESRCH;
        }
 
        reply = xenbus_dev_request_and_reply(&u->u.msg);
        if (IS_ERR(reply)) {
-               kfree(trans);
+               if (msg_type == XS_TRANSACTION_START)
+                       kfree(trans);
                rc = PTR_ERR(reply);
                goto out;
        }
@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
                        list_add(&trans->list, &u->transactions);
                }
        } else if (u->u.msg.type == XS_TRANSACTION_END) {
-               list_for_each_entry(trans, &u->transactions, list)
-                       if (trans->handle.id == u->u.msg.tx_id)
-                               break;
-               BUG_ON(&trans->list == &u->transactions);
                list_del(&trans->list);
-
                kfree(trans);
        }
 
index 374b12af88127c2aef2f359ef3f13013ec548fd1..22f7cd711c5792e25eac035f0aa138c9992d9bd9 100644 (file)
@@ -232,10 +232,10 @@ static void transaction_resume(void)
 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
 {
        void *ret;
-       struct xsd_sockmsg req_msg = *msg;
+       enum xsd_sockmsg_type type = msg->type;
        int err;
 
-       if (req_msg.type == XS_TRANSACTION_START)
+       if (type == XS_TRANSACTION_START)
                transaction_start();
 
        mutex_lock(&xs_state.request_mutex);
@@ -249,12 +249,8 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
 
        mutex_unlock(&xs_state.request_mutex);
 
-       if (IS_ERR(ret))
-               return ret;
-
        if ((msg->type == XS_TRANSACTION_END) ||
-           ((req_msg.type == XS_TRANSACTION_START) &&
-            (msg->type == XS_ERROR)))
+           ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
                transaction_end();
 
        return ret;
index b84c291ba1ebf38d0295b0dd919c794c28caa099..d7b78d531e63f99f6e2a350e081c9836918b64b5 100644 (file)
@@ -74,7 +74,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
                                        v9fs_proto_dotu(v9ses));
        fid = file->private_data;
        if (!fid) {
-               fid = v9fs_fid_clone(file->f_path.dentry);
+               fid = v9fs_fid_clone(file_dentry(file));
                if (IS_ERR(fid))
                        return PTR_ERR(fid);
 
@@ -100,7 +100,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
                 * because we want write after unlink usecase
                 * to work.
                 */
-               fid = v9fs_writeback_fid(file->f_path.dentry);
+               fid = v9fs_writeback_fid(file_dentry(file));
                if (IS_ERR(fid)) {
                        err = PTR_ERR(fid);
                        mutex_unlock(&v9inode->v_mutex);
@@ -516,7 +516,7 @@ v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
                 * because we want write after unlink usecase
                 * to work.
                 */
-               fid = v9fs_writeback_fid(filp->f_path.dentry);
+               fid = v9fs_writeback_fid(file_dentry(filp));
                if (IS_ERR(fid)) {
                        retval = PTR_ERR(fid);
                        mutex_unlock(&v9inode->v_mutex);
index f4645c51526290e0d22a1e2589dbe21bf8e7148b..e2e7c749925ad237c57758ed18186f5bed7d497b 100644 (file)
@@ -853,7 +853,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
        struct p9_fid *fid, *inode_fid;
        struct dentry *res = NULL;
 
-       if (d_unhashed(dentry)) {
+       if (d_in_lookup(dentry)) {
                res = v9fs_vfs_lookup(dir, dentry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
index a34702c998f593f60515d72fcf093cd556f5d951..1b51eaa5e2dd05445d0ba3e9c33038bcfbe4d277 100644 (file)
@@ -254,7 +254,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
        struct posix_acl *pacl = NULL, *dacl = NULL;
        struct dentry *res = NULL;
 
-       if (d_unhashed(dentry)) {
+       if (d_in_lookup(dentry)) {
                res = v9fs_vfs_lookup(dir, dentry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
index f0d268b97d19657929ac2f50cd3bc0e1a80436c7..a439548de785dde9497ed13b2063b696ed773d7f 100644 (file)
@@ -70,9 +70,13 @@ struct autofs_info {
 };
 
 #define AUTOFS_INF_EXPIRING    (1<<0) /* dentry in the process of expiring */
-#define AUTOFS_INF_NO_RCU      (1<<1) /* the dentry is being considered
+#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
                                        * for expiry, so RCU_walk is
-                                       * not permitted
+                                       * not permitted.  If it progresses to
+                                       * actual expiry attempt, the flag is
+                                       * not cleared when EXPIRING is set -
+                                       * in that case it gets cleared only
+                                       * when it comes to clearing EXPIRING.
                                        */
 #define AUTOFS_INF_PENDING     (1<<2) /* dentry pending mount */
 
index 9510d8d2e9cd2fa4ac7919c7bd8bee48c30d6165..b493909e74920be68574b849b426805fd37541d3 100644 (file)
@@ -316,19 +316,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
        if (ino->flags & AUTOFS_INF_PENDING)
                goto out;
        if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
-               ino->flags |= AUTOFS_INF_NO_RCU;
+               ino->flags |= AUTOFS_INF_WANT_EXPIRE;
                spin_unlock(&sbi->fs_lock);
                synchronize_rcu();
                spin_lock(&sbi->fs_lock);
                if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
                        ino->flags |= AUTOFS_INF_EXPIRING;
-                       smp_mb();
-                       ino->flags &= ~AUTOFS_INF_NO_RCU;
                        init_completion(&ino->expire_complete);
                        spin_unlock(&sbi->fs_lock);
                        return root;
                }
-               ino->flags &= ~AUTOFS_INF_NO_RCU;
+               ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
        }
 out:
        spin_unlock(&sbi->fs_lock);
@@ -446,7 +444,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
        while ((dentry = get_next_positive_subdir(dentry, root))) {
                spin_lock(&sbi->fs_lock);
                ino = autofs4_dentry_ino(dentry);
-               if (ino->flags & AUTOFS_INF_NO_RCU)
+               if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
                        expired = NULL;
                else
                        expired = should_expire(dentry, mnt, timeout, how);
@@ -455,7 +453,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
                        continue;
                }
                ino = autofs4_dentry_ino(expired);
-               ino->flags |= AUTOFS_INF_NO_RCU;
+               ino->flags |= AUTOFS_INF_WANT_EXPIRE;
                spin_unlock(&sbi->fs_lock);
                synchronize_rcu();
                spin_lock(&sbi->fs_lock);
@@ -465,7 +463,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
                        goto found;
                }
 
-               ino->flags &= ~AUTOFS_INF_NO_RCU;
+               ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
                if (expired != dentry)
                        dput(expired);
                spin_unlock(&sbi->fs_lock);
@@ -475,17 +473,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
 found:
        pr_debug("returning %p %pd\n", expired, expired);
        ino->flags |= AUTOFS_INF_EXPIRING;
-       smp_mb();
-       ino->flags &= ~AUTOFS_INF_NO_RCU;
        init_completion(&ino->expire_complete);
        spin_unlock(&sbi->fs_lock);
-       spin_lock(&sbi->lookup_lock);
-       spin_lock(&expired->d_parent->d_lock);
-       spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
-       list_move(&expired->d_parent->d_subdirs, &expired->d_child);
-       spin_unlock(&expired->d_lock);
-       spin_unlock(&expired->d_parent->d_lock);
-       spin_unlock(&sbi->lookup_lock);
        return expired;
 }
 
@@ -496,7 +485,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
        int status;
 
        /* Block on any pending expire */
-       if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
+       if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
                return 0;
        if (rcu_walk)
                return -ECHILD;
@@ -554,7 +543,7 @@ int autofs4_expire_run(struct super_block *sb,
        ino = autofs4_dentry_ino(dentry);
        /* avoid rapid-fire expire attempts if expiry fails */
        ino->last_used = now;
-       ino->flags &= ~AUTOFS_INF_EXPIRING;
+       ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
        complete_all(&ino->expire_complete);
        spin_unlock(&sbi->fs_lock);
 
@@ -583,7 +572,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
                spin_lock(&sbi->fs_lock);
                /* avoid rapid-fire expire attempts if expiry fails */
                ino->last_used = now;
-               ino->flags &= ~AUTOFS_INF_EXPIRING;
+               ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
                complete_all(&ino->expire_complete);
                spin_unlock(&sbi->fs_lock);
                dput(dentry);
index 78bd8029852856a486533699d1d25c1a692104a1..3767f6641af1cf885d5254c7a59f249eb14db17e 100644 (file)
@@ -458,7 +458,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
                 */
                struct inode *inode;
 
-               if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
+               if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
                        return 0;
                if (d_mountpoint(dentry))
                        return 0;
index 0146d911f468ca46d5fe7c946851c500b624ccb9..631f1554c87b7d73b9afc7834392ce6e279c7d6d 100644 (file)
@@ -66,11 +66,12 @@ static int autofs4_write(struct autofs_sb_info *sbi,
        set_fs(KERNEL_DS);
 
        mutex_lock(&sbi->pipe_mutex);
-       wr = __vfs_write(file, data, bytes, &file->f_pos);
-       while (bytes && wr) {
+       while (bytes) {
+               wr = __vfs_write(file, data, bytes, &file->f_pos);
+               if (wr <= 0)
+                       break;
                data += wr;
                bytes -= wr;
-               wr = __vfs_write(file, data, bytes, &file->f_pos);
        }
        mutex_unlock(&sbi->pipe_mutex);
 
index e158b22ef32f16d2fcf61c29fbf6d15a65581c1e..a7a28110dc80aa9df1e700d9c24aa297161ab996 100644 (file)
@@ -2275,7 +2275,7 @@ static int elf_core_dump(struct coredump_params *cprm)
                goto end_coredump;
 
        /* Align to page */
-       if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+       if (!dump_skip(cprm, dataoff - cprm->pos))
                goto end_coredump;
 
        for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
index 71ade0e556b7e7cab5b83705228a54099b067749..203589311bf88733bdb53d329a3b05a490ce9164 100644 (file)
@@ -1787,7 +1787,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
                                goto end_coredump;
        }
 
-       if (!dump_skip(cprm, dataoff - cprm->file->f_pos))
+       if (!dump_skip(cprm, dataoff - cprm->pos))
                goto end_coredump;
 
        if (!elf_fdpic_dump_segments(cprm))
index b677a6ea6001a872fc27dd6f90db72459b69f712..7706c8dc5fa637632dce3c9e31f50aa2c9d5f400 100644 (file)
@@ -2645,7 +2645,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
         * This algorithm is recursive because the amount of used stack space
         * is very small and the max recursion depth is limited.
         */
-       indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)",
+       indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)",
                             btrfsic_get_block_type(state, block),
                             block->logical_bytenr, block->dev_state->name,
                             block->dev_bytenr, block->mirror_num);
index 427c36b430a6e52dfadbcd9d9b620b5368d0b2ff..a85cf7d2330981d606352cc8218167570db5507a 100644 (file)
@@ -1373,7 +1373,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 
        if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
                BUG_ON(tm->slot != 0);
-               eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
+               eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start,
+                                               eb->len);
                if (!eb_rewin) {
                        btrfs_tree_read_unlock_blocking(eb);
                        free_extent_buffer(eb);
@@ -1454,7 +1455,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        } else if (old_root) {
                btrfs_tree_read_unlock(eb_root);
                free_extent_buffer(eb_root);
-               eb = alloc_dummy_extent_buffer(root->fs_info, logical);
+               eb = alloc_dummy_extent_buffer(root->fs_info, logical,
+                                       root->nodesize);
        } else {
                btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
                eb = btrfs_clone_extent_buffer(eb_root);
@@ -1552,6 +1554,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
                       trans->transid, root->fs_info->generation);
 
        if (!should_cow_block(trans, root, buf)) {
+               trans->dirty = true;
                *cow_ret = buf;
                return 0;
        }
@@ -1783,10 +1786,12 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
                        if (!err) {
                                tmp = (struct btrfs_disk_key *)(kaddr + offset -
                                                        map_start);
-                       } else {
+                       } else if (err == 1) {
                                read_extent_buffer(eb, &unaligned,
                                                   offset, sizeof(unaligned));
                                tmp = &unaligned;
+                       } else {
+                               return err;
                        }
 
                } else {
@@ -2510,6 +2515,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
                if (!btrfs_buffer_uptodate(tmp, 0, 0))
                        ret = -EIO;
                free_extent_buffer(tmp);
+       } else {
+               ret = PTR_ERR(tmp);
        }
        return ret;
 }
@@ -2773,8 +2780,10 @@ again:
                         * then we don't want to set the path blocking,
                         * so we test it here
                         */
-                       if (!should_cow_block(trans, root, b))
+                       if (!should_cow_block(trans, root, b)) {
+                               trans->dirty = true;
                                goto cow_done;
+                       }
 
                        /*
                         * must have write locks on this node and the
@@ -2823,6 +2832,8 @@ cow_done:
                }
 
                ret = key_search(b, key, level, &prev_cmp, &slot);
+               if (ret < 0)
+                       goto done;
 
                if (level != 0) {
                        int dec = 0;
index 101c3cfd3f7cddd43e07462fd74ebfb06cb4039e..4274a7bfdaed8db3ef94d07d38e755db9125b4d0 100644 (file)
@@ -2518,7 +2518,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root, unsigned long count);
 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
-                                unsigned long count, int wait);
+                                unsigned long count, u64 transid, int wait);
 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, u64 bytenr,
index 61561c2a3f96da0917a0819fb9db48972385496d..d3aaabbfada0adcbb09936a5d47063a4adec65dd 100644 (file)
@@ -1606,15 +1606,23 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
        return 0;
 }
 
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
-                            struct list_head *del_list)
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list)
 {
        struct btrfs_delayed_node *delayed_node;
        struct btrfs_delayed_item *item;
 
        delayed_node = btrfs_get_delayed_node(inode);
        if (!delayed_node)
-               return;
+               return false;
+
+       /*
+        * We can only do one readdir with delayed items at a time because of
+        * item->readdir_list.
+        */
+       inode_unlock_shared(inode);
+       inode_lock(inode);
 
        mutex_lock(&delayed_node->mutex);
        item = __btrfs_first_delayed_insertion_item(delayed_node);
@@ -1641,10 +1649,13 @@ void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
         * requeue or dequeue this delayed node.
         */
        atomic_dec(&delayed_node->refs);
+
+       return true;
 }
 
-void btrfs_put_delayed_items(struct list_head *ins_list,
-                            struct list_head *del_list)
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list)
 {
        struct btrfs_delayed_item *curr, *next;
 
@@ -1659,6 +1670,12 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
                if (atomic_dec_and_test(&curr->refs))
                        kfree(curr);
        }
+
+       /*
+        * The VFS is going to do up_read(), so we need to downgrade back to a
+        * read lock.
+        */
+       downgrade_write(&inode->i_rwsem);
 }
 
 int btrfs_should_delete_dir_index(struct list_head *del_list,
index 0167853c84aea2d93a8fdd0314342e0a1ef42d1b..2495b3d4075f81538fde3ad0ca020648c164db91 100644 (file)
@@ -137,10 +137,12 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
 void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
 
 /* Used for readdir() */
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
-                            struct list_head *del_list);
-void btrfs_put_delayed_items(struct list_head *ins_list,
-                            struct list_head *del_list);
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list);
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list);
 int btrfs_should_delete_dir_index(struct list_head *del_list,
                                  u64 index);
 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
index 6628fca9f4ed87c047e54007f1f94f99ebbaba65..60ce1190307bb976a8fc9229e05ad21b8bedba55 100644 (file)
@@ -1098,7 +1098,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
        struct inode *btree_inode = root->fs_info->btree_inode;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
+       if (IS_ERR(buf))
                return;
        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
                                 buf, 0, WAIT_NONE, btree_get_extent, 0);
@@ -1114,7 +1114,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
        int ret;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
+       if (IS_ERR(buf))
                return 0;
 
        set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
@@ -1147,7 +1147,8 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
                                                 u64 bytenr)
 {
        if (btrfs_test_is_dummy_root(root))
-               return alloc_test_extent_buffer(root->fs_info, bytenr);
+               return alloc_test_extent_buffer(root->fs_info, bytenr,
+                               root->nodesize);
        return alloc_extent_buffer(root->fs_info, bytenr);
 }
 
@@ -1171,8 +1172,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
        int ret;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(buf))
+               return buf;
 
        ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
        if (ret) {
@@ -1314,14 +1315,16 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 /* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(void)
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_root *root;
 
        root = btrfs_alloc_root(NULL, GFP_KERNEL);
        if (!root)
                return ERR_PTR(-ENOMEM);
-       __setup_root(4096, 4096, 4096, root, NULL, 1);
+       /* We don't use the stripesize in selftest, set it as sectorsize */
+       __setup_root(nodesize, sectorsize, sectorsize, root, NULL,
+                       BTRFS_ROOT_TREE_OBJECTID);
        set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
        root->alloc_bytenr = 0;
 
@@ -1803,6 +1806,13 @@ static int cleaner_kthread(void *arg)
                if (btrfs_need_cleaner_sleep(root))
                        goto sleep;
 
+               /*
+                * Do not do anything if we might cause open_ctree() to block
+                * before we have finished mounting the filesystem.
+                */
+               if (!root->fs_info->open)
+                       goto sleep;
+
                if (!mutex_trylock(&root->fs_info->cleaner_mutex))
                        goto sleep;
 
@@ -2517,7 +2527,6 @@ int open_ctree(struct super_block *sb,
        int num_backups_tried = 0;
        int backup_index = 0;
        int max_active;
-       bool cleaner_mutex_locked = false;
 
        tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
        chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
@@ -2797,7 +2806,7 @@ int open_ctree(struct super_block *sb,
 
        nodesize = btrfs_super_nodesize(disk_super);
        sectorsize = btrfs_super_sectorsize(disk_super);
-       stripesize = btrfs_super_stripesize(disk_super);
+       stripesize = sectorsize;
        fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
        fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
 
@@ -2996,13 +3005,6 @@ retry_root_backup:
                goto fail_sysfs;
        }
 
-       /*
-        * Hold the cleaner_mutex thread here so that we don't block
-        * for a long time on btrfs_recover_relocation.  cleaner_kthread
-        * will wait for us to finish mounting the filesystem.
-        */
-       mutex_lock(&fs_info->cleaner_mutex);
-       cleaner_mutex_locked = true;
        fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
                                               "btrfs-cleaner");
        if (IS_ERR(fs_info->cleaner_kthread))
@@ -3062,8 +3064,10 @@ retry_root_backup:
                ret = btrfs_cleanup_fs_roots(fs_info);
                if (ret)
                        goto fail_qgroup;
-               /* We locked cleaner_mutex before creating cleaner_kthread. */
+
+               mutex_lock(&fs_info->cleaner_mutex);
                ret = btrfs_recover_relocation(tree_root);
+               mutex_unlock(&fs_info->cleaner_mutex);
                if (ret < 0) {
                        btrfs_warn(fs_info, "failed to recover relocation: %d",
                                        ret);
@@ -3071,8 +3075,6 @@ retry_root_backup:
                        goto fail_qgroup;
                }
        }
-       mutex_unlock(&fs_info->cleaner_mutex);
-       cleaner_mutex_locked = false;
 
        location.objectid = BTRFS_FS_TREE_OBJECTID;
        location.type = BTRFS_ROOT_ITEM_KEY;
@@ -3186,10 +3188,6 @@ fail_cleaner:
        filemap_write_and_wait(fs_info->btree_inode->i_mapping);
 
 fail_sysfs:
-       if (cleaner_mutex_locked) {
-               mutex_unlock(&fs_info->cleaner_mutex);
-               cleaner_mutex_locked = false;
-       }
        btrfs_sysfs_remove_mounted(fs_info);
 
 fail_fsdev_sysfs:
@@ -4130,6 +4128,16 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
         * Hint to catch really bogus numbers, bitflips or so, more exact checks are
         * done later
         */
+       if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) {
+               btrfs_err(fs_info, "bytes_used is too small %llu",
+                      btrfs_super_bytes_used(sb));
+               ret = -EINVAL;
+       }
+       if (!is_power_of_2(btrfs_super_stripesize(sb))) {
+               btrfs_err(fs_info, "invalid stripesize %u",
+                      btrfs_super_stripesize(sb));
+               ret = -EINVAL;
+       }
        if (btrfs_super_num_devices(sb) > (1UL << 31))
                printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
                                btrfs_super_num_devices(sb));
index 8e79d0070bcf57d26f6792e7775fde0117834810..acba821499a909e3a8d42e62e34c8f8e3f27bfa2 100644 (file)
@@ -90,7 +90,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
 void btrfs_free_fs_root(struct btrfs_root *root);
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-struct btrfs_root *btrfs_alloc_dummy_root(void);
+struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize);
 #endif
 
 /*
index a400951e8678837a515f3fe2f42793bbbd85aa6e..82b912a293ab3f2f06cd54aacf14fcda0827e85d 100644 (file)
@@ -2042,6 +2042,11 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
        struct btrfs_bio *bbio = NULL;
 
 
+       /*
+        * Avoid races with device replace and make sure our bbio has devices
+        * associated to its stripes that don't go away while we are discarding.
+        */
+       btrfs_bio_counter_inc_blocked(root->fs_info);
        /* Tell the block device(s) that the sectors can be discarded */
        ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
                              bytenr, &num_bytes, &bbio, 0);
@@ -2074,6 +2079,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
                }
                btrfs_put_bbio(bbio);
        }
+       btrfs_bio_counter_dec(root->fs_info);
 
        if (actual_bytes)
                *actual_bytes = discarded_bytes;
@@ -2829,6 +2835,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
 
 struct async_delayed_refs {
        struct btrfs_root *root;
+       u64 transid;
        int count;
        int error;
        int sync;
@@ -2844,6 +2851,10 @@ static void delayed_ref_async_start(struct btrfs_work *work)
 
        async = container_of(work, struct async_delayed_refs, work);
 
+       /* if the commit is already started, we don't need to wait here */
+       if (btrfs_transaction_blocked(async->root->fs_info))
+               goto done;
+
        trans = btrfs_join_transaction(async->root);
        if (IS_ERR(trans)) {
                async->error = PTR_ERR(trans);
@@ -2855,10 +2866,15 @@ static void delayed_ref_async_start(struct btrfs_work *work)
         * wait on delayed refs
         */
        trans->sync = true;
+
+       /* Don't bother flushing if we got into a different transaction */
+       if (trans->transid > async->transid)
+               goto end;
+
        ret = btrfs_run_delayed_refs(trans, async->root, async->count);
        if (ret)
                async->error = ret;
-
+end:
        ret = btrfs_end_transaction(trans, async->root);
        if (ret && !async->error)
                async->error = ret;
@@ -2870,7 +2886,7 @@ done:
 }
 
 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
-                                unsigned long count, int wait)
+                                unsigned long count, u64 transid, int wait)
 {
        struct async_delayed_refs *async;
        int ret;
@@ -2882,6 +2898,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
        async->root = root->fs_info->tree_root;
        async->count = count;
        async->error = 0;
+       async->transid = transid;
        if (wait)
                async->sync = 1;
        else
@@ -8010,8 +8027,9 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
        struct extent_buffer *buf;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(buf))
+               return buf;
+
        btrfs_set_header_generation(buf, trans->transid);
        btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
        btrfs_tree_lock(buf);
@@ -8038,7 +8056,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
                         buf->start + buf->len - 1, GFP_NOFS);
        }
-       trans->blocks_used++;
+       trans->dirty = true;
        /* this returns a buffer locked for blocking */
        return buf;
 }
@@ -8653,8 +8671,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
        next = btrfs_find_tree_block(root->fs_info, bytenr);
        if (!next) {
                next = btrfs_find_create_tree_block(root, bytenr);
-               if (!next)
-                       return -ENOMEM;
+               if (IS_ERR(next))
+                       return PTR_ERR(next);
+
                btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
                                               level - 1);
                reada = 1;
index 3cd57825c75f763aa91501c8b342b9f89b7a78cf..75533adef9988ae18b5a3ccab9a1b6128d7d02d8 100644 (file)
@@ -2025,9 +2025,16 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
        bio->bi_iter.bi_size = 0;
        map_length = length;
 
+       /*
+        * Avoid races with device replace and make sure our bbio has devices
+        * associated to its stripes that don't go away while we are doing the
+        * read repair operation.
+        */
+       btrfs_bio_counter_inc_blocked(fs_info);
        ret = btrfs_map_block(fs_info, WRITE, logical,
                              &map_length, &bbio, mirror_num);
        if (ret) {
+               btrfs_bio_counter_dec(fs_info);
                bio_put(bio);
                return -EIO;
        }
@@ -2037,6 +2044,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
        dev = bbio->stripes[mirror_num-1].dev;
        btrfs_put_bbio(bbio);
        if (!dev || !dev->bdev || !dev->writeable) {
+               btrfs_bio_counter_dec(fs_info);
                bio_put(bio);
                return -EIO;
        }
@@ -2045,6 +2053,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
 
        if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
                /* try to remap that extent elsewhere? */
+               btrfs_bio_counter_dec(fs_info);
                bio_put(bio);
                btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
                return -EIO;
@@ -2054,6 +2063,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
                "read error corrected: ino %llu off %llu (dev %s sector %llu)",
                                  btrfs_ino(inode), start,
                                  rcu_str_deref(dev->name), sector);
+       btrfs_bio_counter_dec(fs_info);
        bio_put(bio);
        return 0;
 }
@@ -4718,16 +4728,16 @@ err:
 }
 
 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
-                                               u64 start)
+                                               u64 start, u32 nodesize)
 {
        unsigned long len;
 
        if (!fs_info) {
                /*
                 * Called only from tests that don't always have a fs_info
-                * available, but we know that nodesize is 4096
+                * available
                 */
-               len = 4096;
+               len = nodesize;
        } else {
                len = fs_info->tree_root->nodesize;
        }
@@ -4823,7 +4833,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start)
+                                       u64 start, u32 nodesize)
 {
        struct extent_buffer *eb, *exists = NULL;
        int ret;
@@ -4831,7 +4841,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
        eb = find_extent_buffer(fs_info, start);
        if (eb)
                return eb;
-       eb = alloc_dummy_extent_buffer(fs_info, start);
+       eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
        if (!eb)
                return NULL;
        eb->fs_info = fs_info;
@@ -4882,18 +4892,25 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        int uptodate = 1;
        int ret;
 
+       if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
+               btrfs_err(fs_info, "bad tree block start %llu", start);
+               return ERR_PTR(-EINVAL);
+       }
+
        eb = find_extent_buffer(fs_info, start);
        if (eb)
                return eb;
 
        eb = __alloc_extent_buffer(fs_info, start, len);
        if (!eb)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        for (i = 0; i < num_pages; i++, index++) {
                p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
-               if (!p)
+               if (!p) {
+                       exists = ERR_PTR(-ENOMEM);
                        goto free_eb;
+               }
 
                spin_lock(&mapping->private_lock);
                if (PagePrivate(p)) {
@@ -4938,8 +4955,10 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
                set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 again:
        ret = radix_tree_preload(GFP_NOFS);
-       if (ret)
+       if (ret) {
+               exists = ERR_PTR(ret);
                goto free_eb;
+       }
 
        spin_lock(&fs_info->buffer_lock);
        ret = radix_tree_insert(&fs_info->buffer_radix,
@@ -5323,6 +5342,11 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
        return ret;
 }
 
+/*
+ * return 0 if the item is found within a page.
+ * return 1 if the item spans two pages.
+ * return -EINVAL otherwise.
+ */
 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
                               unsigned long min_len, char **map,
                               unsigned long *map_start,
@@ -5337,7 +5361,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
                PAGE_SHIFT;
 
        if (i != end_i)
-               return -EINVAL;
+               return 1;
 
        if (i == 0) {
                offset = start_offset;
index 1baf19c9b79d2f48d407549b7dcdf03ce0faed3c..c0c1c4fef6cea0a6ab542a38596093b186b5d7d1 100644 (file)
@@ -348,7 +348,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
                                                  u64 start, unsigned long len);
 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
-                                               u64 start);
+                                               u64 start, u32 nodesize);
 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
                                         u64 start);
@@ -468,5 +468,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
                                      u64 *end, u64 max_bytes);
 #endif
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start);
+                                              u64 start, u32 nodesize);
 #endif
index e0c9bd3fb02dffdb46ac221953c941604043bcf5..2234e88cf674b6298aae8a1077737444a12c4d9d 100644 (file)
@@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                reserve_bytes = round_up(write_bytes + sector_offset,
                                root->sectorsize);
 
-               if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
-                                             BTRFS_INODE_PREALLOC)) &&
-                   check_can_nocow(inode, pos, &write_bytes) > 0) {
-                       /*
-                        * For nodata cow case, no need to reserve
-                        * data space.
-                        */
-                       only_release_metadata = true;
-                       /*
-                        * our prealloc extent may be smaller than
-                        * write_bytes, so scale down.
-                        */
-                       num_pages = DIV_ROUND_UP(write_bytes + offset,
-                                                PAGE_SIZE);
-                       reserve_bytes = round_up(write_bytes + sector_offset,
-                                       root->sectorsize);
-                       goto reserve_metadata;
-               }
-
                ret = btrfs_check_data_free_space(inode, pos, write_bytes);
-               if (ret < 0)
-                       break;
+               if (ret < 0) {
+                       if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+                                                     BTRFS_INODE_PREALLOC)) &&
+                           check_can_nocow(inode, pos, &write_bytes) > 0) {
+                               /*
+                                * For nodata cow case, no need to reserve
+                                * data space.
+                                */
+                               only_release_metadata = true;
+                               /*
+                                * our prealloc extent may be smaller than
+                                * write_bytes, so scale down.
+                                */
+                               num_pages = DIV_ROUND_UP(write_bytes + offset,
+                                                        PAGE_SIZE);
+                               reserve_bytes = round_up(write_bytes +
+                                                        sector_offset,
+                                                        root->sectorsize);
+                       } else {
+                               break;
+                       }
+               }
 
-reserve_metadata:
                ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
                if (ret) {
                        if (!only_release_metadata)
index c6dc1183f54249c3ee5681fcec818fc9194bc52b..69d270f6602c2de22a734af3f848ad03bf767906 100644 (file)
@@ -29,7 +29,7 @@
 #include "inode-map.h"
 #include "volumes.h"
 
-#define BITS_PER_BITMAP                (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP                (PAGE_SIZE * 8UL)
 #define MAX_CACHE_BYTES_PER_GIG        SZ_32K
 
 struct btrfs_trim_range {
@@ -1415,11 +1415,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
                                   u64 offset)
 {
        u64 bitmap_start;
-       u32 bytes_per_bitmap;
+       u64 bytes_per_bitmap;
 
        bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
        bitmap_start = offset - ctl->start;
-       bitmap_start = div_u64(bitmap_start, bytes_per_bitmap);
+       bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
        bitmap_start *= bytes_per_bitmap;
        bitmap_start += ctl->start;
 
@@ -1638,10 +1638,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
        u64 bitmap_bytes;
        u64 extent_bytes;
        u64 size = block_group->key.offset;
-       u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
-       u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg);
+       u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
+       u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
 
-       max_bitmaps = max_t(u32, max_bitmaps, 1);
+       max_bitmaps = max_t(u64, max_bitmaps, 1);
 
        ASSERT(ctl->total_bitmaps <= max_bitmaps);
 
@@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
         * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
         * we add more bitmaps.
         */
-       bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE;
+       bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit;
 
        if (bitmap_bytes >= max_bytes) {
                ctl->extents_thresh = 0;
@@ -3662,7 +3662,7 @@ have_info:
                        if (tmp->offset + tmp->bytes < offset)
                                break;
                        if (offset + bytes < tmp->offset) {
-                               n = rb_prev(&info->offset_index);
+                               n = rb_prev(&tmp->offset_index);
                                continue;
                        }
                        info = tmp;
@@ -3676,7 +3676,7 @@ have_info:
                        if (offset + bytes < tmp->offset)
                                break;
                        if (tmp->offset + tmp->bytes < offset) {
-                               n = rb_next(&info->offset_index);
+                               n = rb_next(&tmp->offset_index);
                                continue;
                        }
                        info = tmp;
index aae520b2aee535f069946ba332b83a3ab0d44644..a97fdc156a03512bf36df40dd1b4278df845115f 100644 (file)
@@ -24,6 +24,11 @@ int __init btrfs_hash_init(void)
        return PTR_ERR_OR_ZERO(tfm);
 }
 
+const char* btrfs_crc32c_impl(void)
+{
+       return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
+}
+
 void btrfs_hash_exit(void)
 {
        crypto_free_shash(tfm);
index 118a2316e5d39f51cc6293e912ed9d2598d6afd8..c3a2ec554361f5d33b4b5a1a6eda29dca5b82aad 100644 (file)
@@ -22,6 +22,7 @@
 int __init btrfs_hash_init(void);
 
 void btrfs_hash_exit(void);
+const char* btrfs_crc32c_impl(void);
 
 u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length);
 
index 270499598ed490adb6b7275a7d59e9d6999c47ef..4421954720b8b719a5c91dcc747c3a494884143c 100644 (file)
@@ -3271,7 +3271,16 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
        /* grab metadata reservation from transaction handle */
        if (reserve) {
                ret = btrfs_orphan_reserve_metadata(trans, inode);
-               BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
+               ASSERT(!ret);
+               if (ret) {
+                       atomic_dec(&root->orphan_inodes);
+                       clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+                                 &BTRFS_I(inode)->runtime_flags);
+                       if (insert)
+                               clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                                         &BTRFS_I(inode)->runtime_flags);
+                       return ret;
+               }
        }
 
        /* insert an orphan item to track this unlinked/truncated file */
@@ -4549,6 +4558,7 @@ delete:
                        BUG_ON(ret);
                        if (btrfs_should_throttle_delayed_refs(trans, root))
                                btrfs_async_run_delayed_refs(root,
+                                                            trans->transid,
                                        trans->delayed_ref_updates * 2, 0);
                        if (be_nice) {
                                if (truncate_space_check(trans, root,
@@ -5748,6 +5758,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
        int name_len;
        int is_curr = 0;        /* ctx->pos points to the current index? */
        bool emitted;
+       bool put = false;
 
        /* FIXME, use a real flag for deciding about the key type */
        if (root->fs_info->tree_root == root)
@@ -5765,7 +5776,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
        if (key_type == BTRFS_DIR_INDEX_KEY) {
                INIT_LIST_HEAD(&ins_list);
                INIT_LIST_HEAD(&del_list);
-               btrfs_get_delayed_items(inode, &ins_list, &del_list);
+               put = btrfs_readdir_get_delayed_items(inode, &ins_list,
+                                                     &del_list);
        }
 
        key.type = key_type;
@@ -5912,8 +5924,8 @@ next:
 nopos:
        ret = 0;
 err:
-       if (key_type == BTRFS_DIR_INDEX_KEY)
-               btrfs_put_delayed_items(&ins_list, &del_list);
+       if (put)
+               btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
        btrfs_free_path(path);
        return ret;
 }
@@ -6979,7 +6991,18 @@ insert:
                 * existing will always be non-NULL, since there must be
                 * extent causing the -EEXIST.
                 */
-               if (start >= extent_map_end(existing) ||
+               if (existing->start == em->start &&
+                   extent_map_end(existing) == extent_map_end(em) &&
+                   em->block_start == existing->block_start) {
+                       /*
+                        * these two extents are the same, it happens
+                        * with inlines especially
+                        */
+                       free_extent_map(em);
+                       em = existing;
+                       err = 0;
+
+               } else if (start >= extent_map_end(existing) ||
                    start <= existing->start) {
                        /*
                         * The existing extent map is the one nearest to
@@ -10514,7 +10537,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
 static const struct file_operations btrfs_dir_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .iterate        = btrfs_real_readdir,
+       .iterate_shared = btrfs_real_readdir,
        .unlocked_ioctl = btrfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = btrfs_compat_ioctl,
index 559170464d7c50f43d6804d59dfd9f73e57a707f..aca8264f4a49d029c52ca9363cf4b64dcef1aef2 100644 (file)
@@ -718,12 +718,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
        return count;
 }
 
-void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
+int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
                              const u64 range_start, const u64 range_len)
 {
        struct btrfs_root *root;
        struct list_head splice;
        int done;
+       int total_done = 0;
 
        INIT_LIST_HEAD(&splice);
 
@@ -742,6 +743,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
                done = btrfs_wait_ordered_extents(root, nr,
                                                  range_start, range_len);
                btrfs_put_fs_root(root);
+               total_done += done;
 
                spin_lock(&fs_info->ordered_root_lock);
                if (nr != -1) {
@@ -752,6 +754,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
        list_splice_tail(&splice, &fs_info->ordered_roots);
        spin_unlock(&fs_info->ordered_root_lock);
        mutex_unlock(&fs_info->ordered_operations_mutex);
+
+       return total_done;
 }
 
 /*
@@ -964,6 +968,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
        struct rb_node *prev = NULL;
        struct btrfs_ordered_extent *test;
        int ret = 1;
+       u64 orig_offset = offset;
 
        spin_lock_irq(&tree->lock);
        if (ordered) {
@@ -979,7 +984,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 
        /* truncate file */
        if (disk_i_size > i_size) {
-               BTRFS_I(inode)->disk_i_size = i_size;
+               BTRFS_I(inode)->disk_i_size = orig_offset;
                ret = 0;
                goto out;
        }
index 2049c9be85ee304f41b11753db609fd802de7fd6..451507776ff59f87fdc286db823f4d08ff71899a 100644 (file)
@@ -199,7 +199,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
                           u32 *sum, int len);
 int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
                               const u64 range_start, const u64 range_len);
-void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
+int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr,
                              const u64 range_start, const u64 range_len);
 void btrfs_get_logged_extents(struct inode *inode,
                              struct list_head *logged_list,
index 298631eaee78c1ba26a05e4d97979edbc425f3a7..8428db7cd88fa4b2eada8e1933d34cdaee6bb220 100644 (file)
@@ -761,12 +761,14 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info)
 
        do {
                enqueued = 0;
+               mutex_lock(&fs_devices->device_list_mutex);
                list_for_each_entry(device, &fs_devices->devices, dev_list) {
                        if (atomic_read(&device->reada_in_flight) <
                            MAX_IN_FLIGHT)
                                enqueued += reada_start_machine_dev(fs_info,
                                                                    device);
                }
+               mutex_unlock(&fs_devices->device_list_mutex);
                total += enqueued;
        } while (enqueued && total < 10000);
 
index 46d847f66e4b870dcd54dc18d023f8ca321f40fb..70427ef66b044db3baadd071e9b58b3fa9ce4bfd 100644 (file)
@@ -3582,6 +3582,46 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 */
                scrub_pause_on(fs_info);
                ret = btrfs_inc_block_group_ro(root, cache);
+               if (!ret && is_dev_replace) {
+                       /*
+                        * If we are doing a device replace wait for any tasks
+                        * that started dellaloc right before we set the block
+                        * group to RO mode, as they might have just allocated
+                        * an extent from it or decided they could do a nocow
+                        * write. And if any such tasks did that, wait for their
+                        * ordered extents to complete and then commit the
+                        * current transaction, so that we can later see the new
+                        * extent items in the extent tree - the ordered extents
+                        * create delayed data references (for cow writes) when
+                        * they complete, which will be run and insert the
+                        * corresponding extent items into the extent tree when
+                        * we commit the transaction they used when running
+                        * inode.c:btrfs_finish_ordered_io(). We later use
+                        * the commit root of the extent tree to find extents
+                        * to copy from the srcdev into the tgtdev, and we don't
+                        * want to miss any new extents.
+                        */
+                       btrfs_wait_block_group_reservations(cache);
+                       btrfs_wait_nocow_writers(cache);
+                       ret = btrfs_wait_ordered_roots(fs_info, -1,
+                                                      cache->key.objectid,
+                                                      cache->key.offset);
+                       if (ret > 0) {
+                               struct btrfs_trans_handle *trans;
+
+                               trans = btrfs_join_transaction(root);
+                               if (IS_ERR(trans))
+                                       ret = PTR_ERR(trans);
+                               else
+                                       ret = btrfs_commit_transaction(trans,
+                                                                      root);
+                               if (ret) {
+                                       scrub_pause_off(fs_info);
+                                       btrfs_put_block_group(cache);
+                                       break;
+                               }
+                       }
+               }
                scrub_pause_off(fs_info);
 
                if (ret == 0) {
@@ -3602,9 +3642,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                        break;
                }
 
+               btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
                dev_replace->cursor_right = found_key.offset + length;
                dev_replace->cursor_left = found_key.offset;
                dev_replace->item_needs_writeback = 1;
+               btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
                ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
                                  found_key.offset, cache, is_dev_replace);
 
@@ -3640,6 +3682,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 
                scrub_pause_off(fs_info);
 
+               btrfs_dev_replace_lock(&fs_info->dev_replace, 1);
+               dev_replace->cursor_left = dev_replace->cursor_right;
+               dev_replace->item_needs_writeback = 1;
+               btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
+
                if (ro_set)
                        btrfs_dec_block_group_ro(root, cache);
 
@@ -3677,9 +3724,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                        ret = -ENOMEM;
                        break;
                }
-
-               dev_replace->cursor_left = dev_replace->cursor_right;
-               dev_replace->item_needs_writeback = 1;
 skip:
                key.offset = found_key.offset + length;
                btrfs_release_path(path);
index 4e59a91a11e0447ab05f5c39b9f5baa58121fc23..60e7179ed4b77ff8dcd9de1d8eb1c0cebce9f95d 100644 (file)
@@ -235,7 +235,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
        trans->aborted = errno;
        /* Nothing used. The other threads that have joined this
         * transaction may be able to continue. */
-       if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
+       if (!trans->dirty && list_empty(&trans->new_bgs)) {
                const char *errstr;
 
                errstr = btrfs_decode_error(errno);
@@ -1807,6 +1807,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                        }
                }
                sb->s_flags &= ~MS_RDONLY;
+
+               fs_info->open = 1;
        }
 out:
        wake_up_process(fs_info->transaction_kthread);
@@ -2303,7 +2305,7 @@ static void btrfs_interface_exit(void)
 
 static void btrfs_print_mod_info(void)
 {
-       printk(KERN_INFO "Btrfs loaded"
+       printk(KERN_INFO "Btrfs loaded, crc32c=%s"
 #ifdef CONFIG_BTRFS_DEBUG
                        ", debug=on"
 #endif
@@ -2313,33 +2315,48 @@ static void btrfs_print_mod_info(void)
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
                        ", integrity-checker=on"
 #endif
-                       "\n");
+                       "\n",
+                       btrfs_crc32c_impl());
 }
 
 static int btrfs_run_sanity_tests(void)
 {
-       int ret;
-
+       int ret, i;
+       u32 sectorsize, nodesize;
+       u32 test_sectorsize[] = {
+               PAGE_SIZE,
+       };
        ret = btrfs_init_test_fs();
        if (ret)
                return ret;
-
-       ret = btrfs_test_free_space_cache();
-       if (ret)
-               goto out;
-       ret = btrfs_test_extent_buffer_operations();
-       if (ret)
-               goto out;
-       ret = btrfs_test_extent_io();
-       if (ret)
-               goto out;
-       ret = btrfs_test_inodes();
-       if (ret)
-               goto out;
-       ret = btrfs_test_qgroups();
-       if (ret)
-               goto out;
-       ret = btrfs_test_free_space_tree();
+       for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
+               sectorsize = test_sectorsize[i];
+               for (nodesize = sectorsize;
+                    nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
+                    nodesize <<= 1) {
+                       pr_info("BTRFS: selftest: sectorsize: %u  nodesize: %u\n",
+                               sectorsize, nodesize);
+                       ret = btrfs_test_free_space_cache(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_extent_buffer_operations(sectorsize,
+                               nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_extent_io(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_inodes(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_qgroups(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+                       ret = btrfs_test_free_space_tree(sectorsize, nodesize);
+                       if (ret)
+                               goto out;
+               }
+       }
 out:
        btrfs_destroy_test_fs();
        return ret;
index f54bf450bad3e8e5fa9de2d4a0a439cff295b1f4..02223f3f78f4b54bc1fca294d71ac75b0e61b2ce 100644 (file)
@@ -68,7 +68,7 @@ int btrfs_init_test_fs(void)
        if (IS_ERR(test_mnt)) {
                printk(KERN_ERR "btrfs: cannot mount test file system\n");
                unregister_filesystem(&test_type);
-               return ret;
+               return PTR_ERR(test_mnt);
        }
        return 0;
 }
@@ -175,7 +175,7 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
 }
 
 struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length)
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
 {
        struct btrfs_block_group_cache *cache;
 
@@ -192,8 +192,8 @@ btrfs_alloc_dummy_block_group(unsigned long length)
        cache->key.objectid = 0;
        cache->key.offset = length;
        cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
-       cache->sectorsize = 4096;
-       cache->full_stripe_len = 4096;
+       cache->sectorsize = sectorsize;
+       cache->full_stripe_len = sectorsize;
 
        INIT_LIST_HEAD(&cache->list);
        INIT_LIST_HEAD(&cache->cluster_list);
index 054b8c73c951e4e50be759ea4b8cb322d80e9041..66fb6b701eb72975fa203fd4018ae847cffd46ab 100644 (file)
 struct btrfs_root;
 struct btrfs_trans_handle;
 
-int btrfs_test_free_space_cache(void);
-int btrfs_test_extent_buffer_operations(void);
-int btrfs_test_extent_io(void);
-int btrfs_test_inodes(void);
-int btrfs_test_qgroups(void);
-int btrfs_test_free_space_tree(void);
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
 int btrfs_init_test_fs(void);
 void btrfs_destroy_test_fs(void);
 struct inode *btrfs_new_test_inode(void);
 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
 void btrfs_free_dummy_root(struct btrfs_root *root);
 struct btrfs_block_group_cache *
-btrfs_alloc_dummy_block_group(unsigned long length);
+btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
 void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
 void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
 #else
-static inline int btrfs_test_free_space_cache(void)
+static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_extent_buffer_operations(void)
+static inline int btrfs_test_extent_buffer_operations(u32 sectorsize,
+       u32 nodesize)
 {
        return 0;
 }
@@ -57,19 +58,19 @@ static inline int btrfs_init_test_fs(void)
 static inline void btrfs_destroy_test_fs(void)
 {
 }
-static inline int btrfs_test_extent_io(void)
+static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_inodes(void)
+static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_qgroups(void)
+static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
-static inline int btrfs_test_free_space_tree(void)
+static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
 {
        return 0;
 }
index f51963a8f929e97d8030ca1a1e228263094cf244..4f8cbd1ec5ee340d862a8f8c415ce828ebd1a9c8 100644 (file)
@@ -22,7 +22,7 @@
 #include "../extent_io.h"
 #include "../disk-io.h"
 
-static int test_btrfs_split_item(void)
+static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_path *path;
        struct btrfs_root *root;
@@ -40,7 +40,7 @@ static int test_btrfs_split_item(void)
 
        test_msg("Running btrfs_split_item tests\n");
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Could not allocate root\n");
                return PTR_ERR(root);
@@ -53,7 +53,8 @@ static int test_btrfs_split_item(void)
                return -ENOMEM;
        }
 
-       path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096);
+       path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
+                                                       nodesize);
        if (!eb) {
                test_msg("Could not allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -222,8 +223,8 @@ out:
        return ret;
 }
 
-int btrfs_test_extent_buffer_operations(void)
+int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize)
 {
-       test_msg("Running extent buffer operation tests");
-       return test_btrfs_split_item();
+       test_msg("Running extent buffer operation tests\n");
+       return test_btrfs_split_item(sectorsize, nodesize);
 }
index 55724607f79b8eea00090352d7038885001f7843..d19ab0317283ca728963700c93cff334819eaf16 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <linux/sizes.h>
 #include "btrfs-tests.h"
+#include "../ctree.h"
 #include "../extent_io.h"
 
 #define PROCESS_UNLOCK         (1 << 0)
@@ -65,7 +66,7 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
        return count;
 }
 
-static int test_find_delalloc(void)
+static int test_find_delalloc(u32 sectorsize)
 {
        struct inode *inode;
        struct extent_io_tree tmp;
@@ -113,7 +114,7 @@ static int test_find_delalloc(void)
         * |--- delalloc ---|
         * |---  search  ---|
         */
-       set_extent_delalloc(&tmp, 0, 4095, NULL);
+       set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
        start = 0;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -122,9 +123,9 @@ static int test_find_delalloc(void)
                test_msg("Should have found at least one delalloc\n");
                goto out_bits;
        }
-       if (start != 0 || end != 4095) {
-               test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n",
-                        start, end);
+       if (start != 0 || end != (sectorsize - 1)) {
+               test_msg("Expected start 0 end %u, got start %llu end %llu\n",
+                       sectorsize - 1, start, end);
                goto out_bits;
        }
        unlock_extent(&tmp, start, end);
@@ -144,7 +145,7 @@ static int test_find_delalloc(void)
                test_msg("Couldn't find the locked page\n");
                goto out_bits;
        }
-       set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL);
+       set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -172,7 +173,7 @@ static int test_find_delalloc(void)
         * |--- delalloc ---|
         *                    |--- search ---|
         */
-       test_start = max_bytes + 4096;
+       test_start = max_bytes + sectorsize;
        locked_page = find_lock_page(inode->i_mapping, test_start >>
                                     PAGE_SHIFT);
        if (!locked_page) {
@@ -272,6 +273,16 @@ out:
        return ret;
 }
 
+/**
+ * test_bit_in_byte - Determine whether a bit is set in a byte
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit_in_byte(int nr, const u8 *addr)
+{
+       return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
+}
+
 static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
                             unsigned long len)
 {
@@ -298,25 +309,29 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
                return -EINVAL;
        }
 
-       bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
-                  sizeof(long) * BITS_PER_BYTE);
-       extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
-                                sizeof(long) * BITS_PER_BYTE);
-       if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
-               test_msg("Setting straddling pages failed\n");
-               return -EINVAL;
-       }
+       /* Straddling pages test */
+       if (len > PAGE_SIZE) {
+               bitmap_set(bitmap,
+                       (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+                       sizeof(long) * BITS_PER_BYTE);
+               extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+                                       sizeof(long) * BITS_PER_BYTE);
+               if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+                       test_msg("Setting straddling pages failed\n");
+                       return -EINVAL;
+               }
 
-       bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
-       bitmap_clear(bitmap,
-                    (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
-                    sizeof(long) * BITS_PER_BYTE);
-       extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
-       extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
-                                  sizeof(long) * BITS_PER_BYTE);
-       if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
-               test_msg("Clearing straddling pages failed\n");
-               return -EINVAL;
+               bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
+               bitmap_clear(bitmap,
+                       (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
+                       sizeof(long) * BITS_PER_BYTE);
+               extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
+               extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
+                                       sizeof(long) * BITS_PER_BYTE);
+               if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+                       test_msg("Clearing straddling pages failed\n");
+                       return -EINVAL;
+               }
        }
 
        /*
@@ -333,7 +348,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
        for (i = 0; i < len * BITS_PER_BYTE; i++) {
                int bit, bit1;
 
-               bit = !!test_bit(i, bitmap);
+               bit = !!test_bit_in_byte(i, (u8 *)bitmap);
                bit1 = !!extent_buffer_test_bit(eb, 0, i);
                if (bit1 != bit) {
                        test_msg("Testing bit pattern failed\n");
@@ -351,15 +366,22 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
        return 0;
 }
 
-static int test_eb_bitmaps(void)
+static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
 {
-       unsigned long len = PAGE_SIZE * 4;
+       unsigned long len;
        unsigned long *bitmap;
        struct extent_buffer *eb;
        int ret;
 
        test_msg("Running extent buffer bitmap tests\n");
 
+       /*
+        * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
+        * BTRFS_MAX_METADATA_BLOCKSIZE.
+        */
+       len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
+               ? sectorsize * 4 : sectorsize;
+
        bitmap = kmalloc(len, GFP_KERNEL);
        if (!bitmap) {
                test_msg("Couldn't allocate test bitmap\n");
@@ -379,7 +401,7 @@ static int test_eb_bitmaps(void)
 
        /* Do it over again with an extent buffer which isn't page-aligned. */
        free_extent_buffer(eb);
-       eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len);
+       eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
        if (!eb) {
                test_msg("Couldn't allocate test extent buffer\n");
                kfree(bitmap);
@@ -393,17 +415,17 @@ out:
        return ret;
 }
 
-int btrfs_test_extent_io(void)
+int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
 {
        int ret;
 
        test_msg("Running extent I/O tests\n");
 
-       ret = test_find_delalloc();
+       ret = test_find_delalloc(sectorsize);
        if (ret)
                goto out;
 
-       ret = test_eb_bitmaps();
+       ret = test_eb_bitmaps(sectorsize, nodesize);
 out:
        test_msg("Extent I/O tests finished\n");
        return ret;
index 0eeb8f3d6b674ba8f67fe54877afde605999fcf3..3956bb2ff84cab59bf25fc9869cebc83d8606621 100644 (file)
@@ -22,7 +22,7 @@
 #include "../disk-io.h"
 #include "../free-space-cache.h"
 
-#define BITS_PER_BITMAP                (PAGE_SIZE * 8)
+#define BITS_PER_BITMAP                (PAGE_SIZE * 8UL)
 
 /*
  * This test just does basic sanity checking, making sure we can add an extent
@@ -99,7 +99,8 @@ static int test_extents(struct btrfs_block_group_cache *cache)
        return 0;
 }
 
-static int test_bitmaps(struct btrfs_block_group_cache *cache)
+static int test_bitmaps(struct btrfs_block_group_cache *cache,
+                       u32 sectorsize)
 {
        u64 next_bitmap_offset;
        int ret;
@@ -139,7 +140,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
         * The first bitmap we have starts at offset 0 so the next one is just
         * at the end of the first bitmap.
         */
-       next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+       next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
 
        /* Test a bit straddling two bitmaps */
        ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
@@ -167,9 +168,10 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
 }
 
 /* This is the high grade jackassery */
-static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
+static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache,
+                                   u32 sectorsize)
 {
-       u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+       u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
        int ret;
 
        test_msg("Running bitmap and extent tests\n");
@@ -401,7 +403,8 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
  * requests.
  */
 static int
-test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
+test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
+                                      u32 sectorsize)
 {
        int ret;
        u64 offset;
@@ -539,7 +542,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
         * The goal is to test that the bitmap entry space stealing doesn't
         * steal this space region.
         */
-       ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096);
+       ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize);
        if (ret) {
                test_msg("Error adding free space: %d\n", ret);
                return ret;
@@ -597,8 +600,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -ENOENT;
        }
 
-       if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) {
-               test_msg("Cache free space is not 1Mb + 4Kb\n");
+       if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) {
+               test_msg("Cache free space is not 1Mb + %u\n", sectorsize);
                return -EINVAL;
        }
 
@@ -611,22 +614,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -EINVAL;
        }
 
-       /* All that remains is a 4Kb free space region in a bitmap. Confirm. */
+       /*
+        * All that remains is a sectorsize free space region in a bitmap.
+        * Confirm.
+        */
        ret = check_num_extents_and_bitmaps(cache, 1, 1);
        if (ret)
                return ret;
 
-       if (cache->free_space_ctl->free_space != 4096) {
-               test_msg("Cache free space is not 4Kb\n");
+       if (cache->free_space_ctl->free_space != sectorsize) {
+               test_msg("Cache free space is not %u\n", sectorsize);
                return -EINVAL;
        }
 
        offset = btrfs_find_space_for_alloc(cache,
-                                           0, 4096, 0,
+                                           0, sectorsize, 0,
                                            &max_extent_size);
        if (offset != (SZ_128M + SZ_16M)) {
-               test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n",
-                        offset);
+               test_msg("Failed to allocate %u, returned offset : %llu\n",
+                        sectorsize, offset);
                return -EINVAL;
        }
 
@@ -733,7 +739,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
         * The goal is to test that the bitmap entry space stealing doesn't
         * steal this space region.
         */
-       ret = btrfs_add_free_space(cache, SZ_32M, 8192);
+       ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize);
        if (ret) {
                test_msg("Error adding free space: %d\n", ret);
                return ret;
@@ -757,7 +763,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
 
        /*
         * Confirm that our extent entry didn't stole all free space from the
-        * bitmap, because of the small 8Kb free space region.
+        * bitmap, because of the small 2 * sectorsize free space region.
         */
        ret = check_num_extents_and_bitmaps(cache, 2, 1);
        if (ret)
@@ -783,8 +789,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -ENOENT;
        }
 
-       if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) {
-               test_msg("Cache free space is not 1Mb + 8Kb\n");
+       if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) {
+               test_msg("Cache free space is not 1Mb + %u\n", 2 * sectorsize);
                return -EINVAL;
        }
 
@@ -796,21 +802,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
                return -EINVAL;
        }
 
-       /* All that remains is a 8Kb free space region in a bitmap. Confirm. */
+       /*
+        * All that remains is 2 * sectorsize free space region
+        * in a bitmap. Confirm.
+        */
        ret = check_num_extents_and_bitmaps(cache, 1, 1);
        if (ret)
                return ret;
 
-       if (cache->free_space_ctl->free_space != 8192) {
-               test_msg("Cache free space is not 8Kb\n");
+       if (cache->free_space_ctl->free_space != 2 * sectorsize) {
+               test_msg("Cache free space is not %u\n", 2 * sectorsize);
                return -EINVAL;
        }
 
        offset = btrfs_find_space_for_alloc(cache,
-                                           0, 8192, 0,
+                                           0, 2 * sectorsize, 0,
                                            &max_extent_size);
        if (offset != SZ_32M) {
-               test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n",
+               test_msg("Failed to allocate %u, offset: %llu\n",
+                        2 * sectorsize,
                         offset);
                return -EINVAL;
        }
@@ -825,7 +835,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
        return 0;
 }
 
-int btrfs_test_free_space_cache(void)
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_block_group_cache *cache;
        struct btrfs_root *root = NULL;
@@ -833,13 +843,19 @@ int btrfs_test_free_space_cache(void)
 
        test_msg("Running btrfs free space cache tests\n");
 
-       cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024);
+       /*
+        * For ppc64 (with 64k page size), bytes per bitmap might be
+        * larger than 1G.  To make bitmap test available in ppc64,
+        * alloc dummy block group whose size cross bitmaps.
+        */
+       cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
+                                       + PAGE_SIZE, sectorsize);
        if (!cache) {
                test_msg("Couldn't run the tests\n");
                return 0;
        }
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                ret = PTR_ERR(root);
                goto out;
@@ -855,14 +871,14 @@ int btrfs_test_free_space_cache(void)
        ret = test_extents(cache);
        if (ret)
                goto out;
-       ret = test_bitmaps(cache);
+       ret = test_bitmaps(cache, sectorsize);
        if (ret)
                goto out;
-       ret = test_bitmaps_and_extents(cache);
+       ret = test_bitmaps_and_extents(cache, sectorsize);
        if (ret)
                goto out;
 
-       ret = test_steal_space_from_bitmap_to_extent(cache);
+       ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
 out:
        btrfs_free_dummy_block_group(cache);
        btrfs_free_dummy_root(root);
index 7cea4462acd5f8d2ecf671be0c398dd2d6d62a98..aac507085ab0958f89a1ce81f0dc69d3824c2a6c 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/types.h>
 #include "btrfs-tests.h"
 #include "../ctree.h"
 #include "../disk-io.h"
@@ -30,7 +31,7 @@ struct free_space_extent {
  * The test cases align their operations to this in order to hit some of the
  * edge cases in the bitmap code.
  */
-#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096)
+#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE)
 
 static int __check_free_space_extents(struct btrfs_trans_handle *trans,
                                      struct btrfs_fs_info *fs_info,
@@ -439,7 +440,8 @@ typedef int (*test_func_t)(struct btrfs_trans_handle *,
                           struct btrfs_block_group_cache *,
                           struct btrfs_path *);
 
-static int run_test(test_func_t test_func, int bitmaps)
+static int run_test(test_func_t test_func, int bitmaps,
+               u32 sectorsize, u32 nodesize)
 {
        struct btrfs_root *root = NULL;
        struct btrfs_block_group_cache *cache = NULL;
@@ -447,7 +449,7 @@ static int run_test(test_func_t test_func, int bitmaps)
        struct btrfs_path *path = NULL;
        int ret;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate dummy root\n");
                ret = PTR_ERR(root);
@@ -466,7 +468,8 @@ static int run_test(test_func_t test_func, int bitmaps)
        root->fs_info->free_space_root = root;
        root->fs_info->tree_root = root;
 
-       root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+       root->node = alloc_test_extent_buffer(root->fs_info,
+               nodesize, nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -474,9 +477,9 @@ static int run_test(test_func_t test_func, int bitmaps)
        }
        btrfs_set_header_level(root->node, 0);
        btrfs_set_header_nritems(root->node, 0);
-       root->alloc_bytenr += 8192;
+       root->alloc_bytenr += 2 * nodesize;
 
-       cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE);
+       cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize);
        if (!cache) {
                test_msg("Couldn't allocate dummy block group cache\n");
                ret = -ENOMEM;
@@ -534,17 +537,18 @@ out:
        return ret;
 }
 
-static int run_test_both_formats(test_func_t test_func)
+static int run_test_both_formats(test_func_t test_func,
+       u32 sectorsize, u32 nodesize)
 {
        int ret;
 
-       ret = run_test(test_func, 0);
+       ret = run_test(test_func, 0, sectorsize, nodesize);
        if (ret)
                return ret;
-       return run_test(test_func, 1);
+       return run_test(test_func, 1, sectorsize, nodesize);
 }
 
-int btrfs_test_free_space_tree(void)
+int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
 {
        test_func_t tests[] = {
                test_empty_block_group,
@@ -561,9 +565,11 @@ int btrfs_test_free_space_tree(void)
 
        test_msg("Running free space tree tests\n");
        for (i = 0; i < ARRAY_SIZE(tests); i++) {
-               int ret = run_test_both_formats(tests[i]);
+               int ret = run_test_both_formats(tests[i], sectorsize,
+                       nodesize);
                if (ret) {
-                       test_msg("%pf failed\n", tests[i]);
+                       test_msg("%pf : sectorsize %u failed\n",
+                               tests[i], sectorsize);
                        return ret;
                }
        }
index 8a25fe8b7c452e78d823da897068748ae280de32..29648c0a39f1bc380eaffb08f0d574098dc9d343 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/types.h>
 #include "btrfs-tests.h"
 #include "../ctree.h"
 #include "../btrfs_inode.h"
@@ -86,19 +87,19 @@ static void insert_inode_item_key(struct btrfs_root *root)
  * diagram of how the extents will look though this may not be possible we still
  * want to make sure everything acts normally (the last number is not inclusive)
  *
- * [0 - 5][5 -  6][6 - 10][10 - 4096][  4096 - 8192 ][8192 - 12288]
- * [hole ][inline][ hole ][ regular ][regular1 split][    hole    ]
+ * [0 - 5][5 -  6][     6 - 4096     ][ 4096 - 4100][4100 - 8195][8195 - 12291]
+ * [hole ][inline][hole but no extent][  hole   ][   regular ][regular1 split]
  *
- * [ 12288 - 20480][20480 - 24576][  24576 - 28672  ][28672 - 36864][36864 - 45056]
- * [regular1 split][   prealloc1 ][prealloc1 written][   prealloc1 ][ compressed  ]
+ * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ]
+ * [    hole    ][regular1 split][   prealloc ][   prealloc1  ][prealloc1 written]
  *
- * [45056 - 49152][49152-53248][53248-61440][61440-65536][     65536+81920   ]
- * [ compressed1 ][  regular  ][compressed1][  regular  ][ hole but no extent]
+ * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635]
+ * [  prealloc1  ][ compressed  ][ compressed1 ][    regular  ][ compressed1]
  *
- * [81920-86016]
- * [  regular  ]
+ * [69635-73731][   73731 - 86019   ][86019-90115]
+ * [  regular  ][ hole but no extent][  regular  ]
  */
-static void setup_file_extents(struct btrfs_root *root)
+static void setup_file_extents(struct btrfs_root *root, u32 sectorsize)
 {
        int slot = 0;
        u64 disk_bytenr = SZ_1M;
@@ -119,7 +120,7 @@ static void setup_file_extents(struct btrfs_root *root)
        insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0,
                      slot);
        slot++;
-       offset = 4096;
+       offset = sectorsize;
 
        /* Now another hole */
        insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0,
@@ -128,99 +129,106 @@ static void setup_file_extents(struct btrfs_root *root)
        offset += 4;
 
        /* Now for a regular extent */
-       insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0,
+                     disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       disk_bytenr += 4096;
-       offset += 4095;
+       disk_bytenr += sectorsize;
+       offset += sectorsize - 1;
 
        /*
         * Now for 3 extents that were split from a hole punch so we test
         * offsets properly.
         */
-       insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+                     4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG,
-                     0, slot);
+       offset += sectorsize;
+       insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0,
+                     BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+       offset += sectorsize;
+       insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+                     2 * sectorsize, disk_bytenr, 4 * sectorsize,
                      BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 8192;
-       disk_bytenr += 16384;
+       offset += 2 * sectorsize;
+       disk_bytenr += 4 * sectorsize;
 
        /* Now for a unwritten prealloc extent */
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+       insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+               sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
        slot++;
-       offset += 4096;
+       offset += sectorsize;
 
        /*
         * We want to jack up disk_bytenr a little more so the em stuff doesn't
         * merge our records.
         */
-       disk_bytenr += 8192;
+       disk_bytenr += 2 * sectorsize;
 
        /*
         * Now for a partially written prealloc extent, basically the same as
         * the hole punch example above.  Ram_bytes never changes when you mark
         * extents written btw.
         */
-       insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384,
-                     BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+                     4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       offset += sectorsize;
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize,
+                     disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0,
+                     slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384,
+       offset += sectorsize;
+       insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+                     2 * sectorsize, disk_bytenr, 4 * sectorsize,
                      BTRFS_FILE_EXTENT_PREALLOC, 0, slot);
        slot++;
-       offset += 8192;
-       disk_bytenr += 16384;
+       offset += 2 * sectorsize;
+       disk_bytenr += 4 * sectorsize;
 
        /* Now a normal compressed extent */
-       insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+       insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0,
+                     disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG,
+                     BTRFS_COMPRESS_ZLIB, slot);
        slot++;
-       offset += 8192;
+       offset += 2 * sectorsize;
        /* No merges */
-       disk_bytenr += 8192;
+       disk_bytenr += 2 * sectorsize;
 
        /* Now a split compressed extent */
-       insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
+       insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr,
+                     sectorsize, BTRFS_FILE_EXTENT_REG,
+                     BTRFS_COMPRESS_ZLIB, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096,
+       offset += sectorsize;
+       insert_extent(root, offset, sectorsize, sectorsize, 0,
+                     disk_bytenr + sectorsize, sectorsize,
                      BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 4096;
-       insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096,
+       offset += sectorsize;
+       insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize,
+                     2 * sectorsize, disk_bytenr, sectorsize,
                      BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot);
        slot++;
-       offset += 8192;
-       disk_bytenr += 8192;
+       offset += 2 * sectorsize;
+       disk_bytenr += 2 * sectorsize;
 
        /* Now extents that have a hole but no hole extent */
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+                     sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
        slot++;
-       offset += 16384;
-       disk_bytenr += 4096;
-       insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, slot);
+       offset += 4 * sectorsize;
+       disk_bytenr += sectorsize;
+       insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr,
+                     sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot);
 }
 
 static unsigned long prealloc_only = 0;
 static unsigned long compressed_only = 0;
 static unsigned long vacancy_only = 0;
 
-static noinline int test_btrfs_get_extent(void)
+static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
 {
        struct inode *inode = NULL;
        struct btrfs_root *root = NULL;
@@ -240,7 +248,7 @@ static noinline int test_btrfs_get_extent(void)
        BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
        BTRFS_I(inode)->location.offset = 0;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                goto out;
@@ -256,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(NULL, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
@@ -273,7 +281,7 @@ static noinline int test_btrfs_get_extent(void)
 
        /* First with no extents */
        BTRFS_I(inode)->root = root;
-       em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0);
        if (IS_ERR(em)) {
                em = NULL;
                test_msg("Got an error when we shouldn't have\n");
@@ -295,7 +303,7 @@ static noinline int test_btrfs_get_extent(void)
         * setup_file_extents, so if you change anything there you need to
         * update the comment and update the expected values below.
         */
-       setup_file_extents(root);
+       setup_file_extents(root, sectorsize);
 
        em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0);
        if (IS_ERR(em)) {
@@ -318,7 +326,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -327,7 +335,8 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected an inline, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4091) {
+
+       if (em->start != offset || em->len != (sectorsize - 5)) {
                test_msg("Unexpected extent wanted start %llu len 1, got start "
                         "%llu len %llu\n", offset, em->start, em->len);
                goto out;
@@ -344,7 +353,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -366,7 +375,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Regular extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -375,7 +384,7 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4095) {
+       if (em->start != offset || em->len != sectorsize - 1) {
                test_msg("Unexpected extent wanted start %llu len 4095, got "
                         "start %llu len %llu\n", offset, em->start, em->len);
                goto out;
@@ -393,7 +402,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* The next 3 are split extents */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -402,9 +411,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -421,7 +431,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -430,9 +440,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a hole, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -442,7 +453,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -451,9 +462,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -475,7 +487,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Prealloc extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -484,9 +496,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != prealloc_only) {
@@ -503,7 +516,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* The next 3 are a half written prealloc extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -512,9 +525,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != prealloc_only) {
@@ -532,7 +546,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -541,9 +555,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -564,7 +579,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -573,9 +588,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != prealloc_only) {
@@ -598,7 +614,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Now for the compressed extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -607,9 +623,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u,"
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != compressed_only) {
@@ -631,7 +648,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* Split compressed extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -640,9 +657,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u,"
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != compressed_only) {
@@ -665,7 +683,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -674,9 +692,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -691,7 +710,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -701,9 +720,10 @@ static noinline int test_btrfs_get_extent(void)
                         disk_bytenr, em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 8192) {
-               test_msg("Unexpected extent wanted start %llu len 8192, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 2 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 2 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != compressed_only) {
@@ -725,7 +745,7 @@ static noinline int test_btrfs_get_extent(void)
        free_extent_map(em);
 
        /* A hole between regular extents but no hole extent */
-       em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -734,9 +754,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -765,9 +786,10 @@ static noinline int test_btrfs_get_extent(void)
         * length of the actual hole, if this changes we'll have to change this
         * test.
         */
-       if (em->start != offset || em->len != 12288) {
-               test_msg("Unexpected extent wanted start %llu len 12288, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != 3 * sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u, "
+                       "got start %llu len %llu\n",
+                       offset, 3 * sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != vacancy_only) {
@@ -783,7 +805,7 @@ static noinline int test_btrfs_get_extent(void)
        offset = em->start + em->len;
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0);
+       em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -792,9 +814,10 @@ static noinline int test_btrfs_get_extent(void)
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != offset || em->len != 4096) {
-               test_msg("Unexpected extent wanted start %llu len 4096, got "
-                        "start %llu len %llu\n", offset, em->start, em->len);
+       if (em->start != offset || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %llu len %u,"
+                       "got start %llu len %llu\n",
+                       offset, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -815,7 +838,7 @@ out:
        return ret;
 }
 
-static int test_hole_first(void)
+static int test_hole_first(u32 sectorsize, u32 nodesize)
 {
        struct inode *inode = NULL;
        struct btrfs_root *root = NULL;
@@ -832,7 +855,7 @@ static int test_hole_first(void)
        BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
        BTRFS_I(inode)->location.offset = 0;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                goto out;
@@ -844,7 +867,7 @@ static int test_hole_first(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(NULL, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
@@ -861,9 +884,9 @@ static int test_hole_first(void)
         * btrfs_get_extent.
         */
        insert_inode_item_key(root);
-       insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096,
-                     BTRFS_FILE_EXTENT_REG, 0, 1);
-       em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0);
+       insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize,
+                     sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1);
+       em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
@@ -872,9 +895,10 @@ static int test_hole_first(void)
                test_msg("Expected a hole, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != 0 || em->len != 4096) {
-               test_msg("Unexpected extent wanted start 0 len 4096, got start "
-                        "%llu len %llu\n", em->start, em->len);
+       if (em->start != 0 || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start 0 len %u, "
+                       "got start %llu len %llu\n",
+                       sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != vacancy_only) {
@@ -884,18 +908,19 @@ static int test_hole_first(void)
        }
        free_extent_map(em);
 
-       em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0);
+       em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0);
        if (IS_ERR(em)) {
                test_msg("Got an error when we shouldn't have\n");
                goto out;
        }
-       if (em->block_start != 4096) {
+       if (em->block_start != sectorsize) {
                test_msg("Expected a real extent, got %llu\n", em->block_start);
                goto out;
        }
-       if (em->start != 4096 || em->len != 4096) {
-               test_msg("Unexpected extent wanted start 4096 len 4096, got "
-                        "start %llu len %llu\n", em->start, em->len);
+       if (em->start != sectorsize || em->len != sectorsize) {
+               test_msg("Unexpected extent wanted start %u len %u, "
+                       "got start %llu len %llu\n",
+                       sectorsize, sectorsize, em->start, em->len);
                goto out;
        }
        if (em->flags != 0) {
@@ -912,7 +937,7 @@ out:
        return ret;
 }
 
-static int test_extent_accounting(void)
+static int test_extent_accounting(u32 sectorsize, u32 nodesize)
 {
        struct inode *inode = NULL;
        struct btrfs_root *root = NULL;
@@ -924,7 +949,7 @@ static int test_extent_accounting(void)
                return ret;
        }
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                goto out;
@@ -954,10 +979,11 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE][4k] */
+       /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
        BTRFS_I(inode)->outstanding_extents++;
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
-                                       BTRFS_MAX_EXTENT_SIZE + 4095, NULL);
+                                       BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
+                                       NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -969,10 +995,10 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */
+       /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */
        ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
                               BTRFS_MAX_EXTENT_SIZE >> 1,
-                              (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+                              (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1,
                               EXTENT_DELALLOC | EXTENT_DIRTY |
                               EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
                               NULL, GFP_KERNEL);
@@ -987,10 +1013,11 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE][4K] */
+       /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
        BTRFS_I(inode)->outstanding_extents++;
        ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
-                                       (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
+                                       (BTRFS_MAX_EXTENT_SIZE >> 1)
+                                       + sectorsize - 1,
                                        NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
@@ -1004,16 +1031,17 @@ static int test_extent_accounting(void)
        }
 
        /*
-        * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K]
+        * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize]
         *
         * I'm artificially adding 2 to outstanding_extents because in the
         * buffered IO case we'd add things up as we go, but I don't feel like
         * doing that here, this isn't the interesting case we want to test.
         */
        BTRFS_I(inode)->outstanding_extents += 2;
-       ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192,
-                                       (BTRFS_MAX_EXTENT_SIZE << 1) + 12287,
-                                       NULL);
+       ret = btrfs_set_extent_delalloc(inode,
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
+                       (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
+                       NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1025,10 +1053,13 @@ static int test_extent_accounting(void)
                goto out;
        }
 
-       /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */
+       /*
+       * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize]
+       */
        BTRFS_I(inode)->outstanding_extents++;
-       ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
-                                       BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+       ret = btrfs_set_extent_delalloc(inode,
+                       BTRFS_MAX_EXTENT_SIZE + sectorsize,
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1042,8 +1073,8 @@ static int test_extent_accounting(void)
 
        /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */
        ret = clear_extent_bit(&BTRFS_I(inode)->io_tree,
-                              BTRFS_MAX_EXTENT_SIZE+4096,
-                              BTRFS_MAX_EXTENT_SIZE+8191,
+                              BTRFS_MAX_EXTENT_SIZE + sectorsize,
+                              BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1,
                               EXTENT_DIRTY | EXTENT_DELALLOC |
                               EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
                               NULL, GFP_KERNEL);
@@ -1063,8 +1094,9 @@ static int test_extent_accounting(void)
         * might fail and I'd rather satisfy my paranoia at this point.
         */
        BTRFS_I(inode)->outstanding_extents++;
-       ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096,
-                                       BTRFS_MAX_EXTENT_SIZE+8191, NULL);
+       ret = btrfs_set_extent_delalloc(inode,
+                       BTRFS_MAX_EXTENT_SIZE + sectorsize,
+                       BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL);
        if (ret) {
                test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
                goto out;
@@ -1103,7 +1135,7 @@ out:
        return ret;
 }
 
-int btrfs_test_inodes(void)
+int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
 {
        int ret;
 
@@ -1112,13 +1144,13 @@ int btrfs_test_inodes(void)
        set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only);
 
        test_msg("Running btrfs_get_extent tests\n");
-       ret = test_btrfs_get_extent();
+       ret = test_btrfs_get_extent(sectorsize, nodesize);
        if (ret)
                return ret;
        test_msg("Running hole first btrfs_get_extent test\n");
-       ret = test_hole_first();
+       ret = test_hole_first(sectorsize, nodesize);
        if (ret)
                return ret;
        test_msg("Running outstanding_extents tests\n");
-       return test_extent_accounting();
+       return test_extent_accounting(sectorsize, nodesize);
 }
index 8aa4ded31326c95e8b19b4ced8f9c9fab1c4c8de..57a12c0d680ba300461ff5ee9714833b541f2810 100644 (file)
@@ -16,6 +16,7 @@
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/types.h>
 #include "btrfs-tests.h"
 #include "../ctree.h"
 #include "../transaction.h"
@@ -216,7 +217,8 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
        return ret;
 }
 
-static int test_no_shared_qgroup(struct btrfs_root *root)
+static int test_no_shared_qgroup(struct btrfs_root *root,
+               u32 sectorsize, u32 nodesize)
 {
        struct btrfs_trans_handle trans;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -227,7 +229,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
        btrfs_init_dummy_trans(&trans);
 
        test_msg("Qgroup basic add\n");
-       ret = btrfs_create_qgroup(NULL, fs_info, 5);
+       ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID);
        if (ret) {
                test_msg("Couldn't create a qgroup %d\n", ret);
                return ret;
@@ -238,18 +240,19 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
         * we can only call btrfs_qgroup_account_extent() directly to test
         * quota.
         */
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+       ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FS_TREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -257,32 +260,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                               nodesize, nodesize)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
        old_roots = NULL;
        new_roots = NULL;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = remove_extent_item(root, 4096, 4096);
+       ret = remove_extent_item(root, nodesize, nodesize);
        if (ret)
                return -EINVAL;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -290,14 +294,14 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return -EINVAL;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
@@ -310,7 +314,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
  * right, also remove one of the roots and make sure the exclusive count is
  * adjusted properly.
  */
-static int test_multiple_refs(struct btrfs_root *root)
+static int test_multiple_refs(struct btrfs_root *root,
+               u32 sectorsize, u32 nodesize)
 {
        struct btrfs_trans_handle trans;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -322,25 +327,29 @@ static int test_multiple_refs(struct btrfs_root *root)
 
        test_msg("Qgroup multiple refs test\n");
 
-       /* We have 5 created already from the previous test */
-       ret = btrfs_create_qgroup(NULL, fs_info, 256);
+       /*
+        * We have BTRFS_FS_TREE_OBJECTID created already from the
+        * previous test.
+        */
+       ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID);
        if (ret) {
                test_msg("Couldn't create a qgroup %d\n", ret);
                return ret;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
+       ret = insert_normal_tree_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FS_TREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -348,30 +357,32 @@ static int test_multiple_refs(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                                      nodesize, nodesize)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = add_tree_ref(root, 4096, 4096, 0, 256);
+       ret = add_tree_ref(root, nodesize, nodesize, 0,
+                       BTRFS_FIRST_FREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -379,35 +390,38 @@ static int test_multiple_refs(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                                       nodesize, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+                                       nodesize, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots);
        if (ret) {
                ulist_free(old_roots);
                test_msg("Couldn't find old roots: %d\n", ret);
                return ret;
        }
 
-       ret = remove_extent_ref(root, 4096, 4096, 0, 256);
+       ret = remove_extent_ref(root, nodesize, nodesize, 0,
+                               BTRFS_FIRST_FREE_OBJECTID);
        if (ret)
                return ret;
 
-       ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
+       ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots);
        if (ret) {
                ulist_free(old_roots);
                ulist_free(new_roots);
@@ -415,19 +429,21 @@ static int test_multiple_refs(struct btrfs_root *root)
                return ret;
        }
 
-       ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
-                                         old_roots, new_roots);
+       ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize,
+                                         nodesize, old_roots, new_roots);
        if (ret) {
                test_msg("Couldn't account space for a qgroup %d\n", ret);
                return ret;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID,
+                                       0, 0)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
 
-       if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) {
+       if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID,
+                                       nodesize, nodesize)) {
                test_msg("Qgroup counts didn't match expected values\n");
                return -EINVAL;
        }
@@ -435,13 +451,13 @@ static int test_multiple_refs(struct btrfs_root *root)
        return 0;
 }
 
-int btrfs_test_qgroups(void)
+int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
 {
        struct btrfs_root *root;
        struct btrfs_root *tmp_root;
        int ret = 0;
 
-       root = btrfs_alloc_dummy_root();
+       root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(root)) {
                test_msg("Couldn't allocate root\n");
                return PTR_ERR(root);
@@ -468,7 +484,8 @@ int btrfs_test_qgroups(void)
         * Can't use bytenr 0, some things freak out
         * *cough*backref walking code*cough*
         */
-       root->node = alloc_test_extent_buffer(root->fs_info, 4096);
+       root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
+                                       nodesize);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -476,16 +493,16 @@ int btrfs_test_qgroups(void)
        }
        btrfs_set_header_level(root->node, 0);
        btrfs_set_header_nritems(root->node, 0);
-       root->alloc_bytenr += 8192;
+       root->alloc_bytenr += 2 * nodesize;
 
-       tmp_root = btrfs_alloc_dummy_root();
+       tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(tmp_root)) {
                test_msg("Couldn't allocate a fs root\n");
                ret = PTR_ERR(tmp_root);
                goto out;
        }
 
-       tmp_root->root_key.objectid = 5;
+       tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID;
        root->fs_info->fs_root = tmp_root;
        ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
        if (ret) {
@@ -493,14 +510,14 @@ int btrfs_test_qgroups(void)
                goto out;
        }
 
-       tmp_root = btrfs_alloc_dummy_root();
+       tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
        if (IS_ERR(tmp_root)) {
                test_msg("Couldn't allocate a fs root\n");
                ret = PTR_ERR(tmp_root);
                goto out;
        }
 
-       tmp_root->root_key.objectid = 256;
+       tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID;
        ret = btrfs_insert_fs_root(root->fs_info, tmp_root);
        if (ret) {
                test_msg("Couldn't insert fs root %d\n", ret);
@@ -508,10 +525,10 @@ int btrfs_test_qgroups(void)
        }
 
        test_msg("Running qgroup tests\n");
-       ret = test_no_shared_qgroup(root);
+       ret = test_no_shared_qgroup(root, sectorsize, nodesize);
        if (ret)
                goto out;
-       ret = test_multiple_refs(root);
+       ret = test_multiple_refs(root, sectorsize, nodesize);
 out:
        btrfs_free_dummy_root(root);
        return ret;
index f6e24cb423aed0c78316b983d3687d07bb2ae1ec..948aa186b353caf55748fd983e975c127b654f0f 100644 (file)
@@ -818,6 +818,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 {
        struct btrfs_transaction *cur_trans = trans->transaction;
        struct btrfs_fs_info *info = root->fs_info;
+       u64 transid = trans->transid;
        unsigned long cur = trans->delayed_ref_updates;
        int lock = (trans->type != TRANS_JOIN_NOLOCK);
        int err = 0;
@@ -905,7 +906,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
        if (must_run_delayed_refs) {
-               btrfs_async_run_delayed_refs(root, cur,
+               btrfs_async_run_delayed_refs(root, cur, transid,
                                             must_run_delayed_refs == 1);
        }
        return err;
@@ -1311,11 +1312,6 @@ int btrfs_defrag_root(struct btrfs_root *root)
        return ret;
 }
 
-/* Bisesctability fixup, remove in 4.8 */
-#ifndef btrfs_std_error
-#define btrfs_std_error btrfs_handle_fs_error
-#endif
-
 /*
  * Do all special snapshot related qgroup dirty hack.
  *
@@ -1385,7 +1381,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
        switch_commit_roots(trans->transaction, fs_info);
        ret = btrfs_write_and_wait_transaction(trans, src);
        if (ret)
-               btrfs_std_error(fs_info, ret,
+               btrfs_handle_fs_error(fs_info, ret,
                        "Error while writing out transaction for qgroup");
 
 out:
index 9fe0ec2bf0fe7fefda878bb9e885071f0d8e3f7e..c5abee4f01add54f811c78fac76170f8b5c02bb3 100644 (file)
@@ -110,7 +110,6 @@ struct btrfs_trans_handle {
        u64 chunk_bytes_reserved;
        unsigned long use_count;
        unsigned long blocks_reserved;
-       unsigned long blocks_used;
        unsigned long delayed_ref_updates;
        struct btrfs_transaction *transaction;
        struct btrfs_block_rsv *block_rsv;
@@ -121,6 +120,7 @@ struct btrfs_trans_handle {
        bool can_flush_pending_bgs;
        bool reloc_reserved;
        bool sync;
+       bool dirty;
        unsigned int type;
        /*
         * this root is only needed to validate that the root passed to
index b7665af471d88510d391c4ffc8bcbb65e506bd50..c05f69a8ec42dad19269620e0ec7e8b38f91ca25 100644 (file)
@@ -2422,8 +2422,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                root_owner = btrfs_header_owner(parent);
 
                next = btrfs_find_create_tree_block(root, bytenr);
-               if (!next)
-                       return -ENOMEM;
+               if (IS_ERR(next))
+                       return PTR_ERR(next);
 
                if (*level == 1) {
                        ret = wc->process_func(root, next, wc, ptr_gen);
index bdc62561ede8f534358e649b8b69763ec5f512dc..589f128173b10e1648ce2714cd61c042d5c9e6da 100644 (file)
@@ -2761,6 +2761,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
        u64 dev_extent_len = 0;
        u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
        int i, ret = 0;
+       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
 
        /* Just in case */
        root = root->fs_info->chunk_root;
@@ -2787,12 +2788,19 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
        check_system_chunk(trans, extent_root, map->type);
        unlock_chunks(root->fs_info->chunk_root);
 
+       /*
+        * Take the device list mutex to prevent races with the final phase of
+        * a device replace operation that replaces the device object associated
+        * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
+        */
+       mutex_lock(&fs_devices->device_list_mutex);
        for (i = 0; i < map->num_stripes; i++) {
                struct btrfs_device *device = map->stripes[i].dev;
                ret = btrfs_free_dev_extent(trans, device,
                                            map->stripes[i].physical,
                                            &dev_extent_len);
                if (ret) {
+                       mutex_unlock(&fs_devices->device_list_mutex);
                        btrfs_abort_transaction(trans, root, ret);
                        goto out;
                }
@@ -2811,11 +2819,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
                if (map->stripes[i].dev) {
                        ret = btrfs_update_device(trans, map->stripes[i].dev);
                        if (ret) {
+                               mutex_unlock(&fs_devices->device_list_mutex);
                                btrfs_abort_transaction(trans, root, ret);
                                goto out;
                        }
                }
        }
+       mutex_unlock(&fs_devices->device_list_mutex);
+
        ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
        if (ret) {
                btrfs_abort_transaction(trans, root, ret);
@@ -4230,6 +4241,7 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
        if (IS_ERR(uuid_root)) {
                ret = PTR_ERR(uuid_root);
                btrfs_abort_transaction(trans, tree_root, ret);
+               btrfs_end_transaction(trans, tree_root);
                return ret;
        }
 
@@ -4682,12 +4694,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 
        if (type & BTRFS_BLOCK_GROUP_RAID5) {
                raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
-                                btrfs_super_stripesize(info->super_copy));
+                                               extent_root->stripesize);
                data_stripes = num_stripes - 1;
        }
        if (type & BTRFS_BLOCK_GROUP_RAID6) {
                raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
-                                btrfs_super_stripesize(info->super_copy));
+                                               extent_root->stripesize);
                data_stripes = num_stripes - 2;
        }
 
@@ -5762,20 +5774,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                        }
                }
                if (found) {
-                       if (physical_of_found + map->stripe_len <=
-                           dev_replace->cursor_left) {
-                               struct btrfs_bio_stripe *tgtdev_stripe =
-                                       bbio->stripes + num_stripes;
+                       struct btrfs_bio_stripe *tgtdev_stripe =
+                               bbio->stripes + num_stripes;
 
-                               tgtdev_stripe->physical = physical_of_found;
-                               tgtdev_stripe->length =
-                                       bbio->stripes[index_srcdev].length;
-                               tgtdev_stripe->dev = dev_replace->tgtdev;
-                               bbio->tgtdev_map[index_srcdev] = num_stripes;
+                       tgtdev_stripe->physical = physical_of_found;
+                       tgtdev_stripe->length =
+                               bbio->stripes[index_srcdev].length;
+                       tgtdev_stripe->dev = dev_replace->tgtdev;
+                       bbio->tgtdev_map[index_srcdev] = num_stripes;
 
-                               tgtdev_indexes++;
-                               num_stripes++;
-                       }
+                       tgtdev_indexes++;
+                       num_stripes++;
                }
        }
 
@@ -6250,27 +6259,23 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
        return dev;
 }
 
-static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
-                         struct extent_buffer *leaf,
-                         struct btrfs_chunk *chunk)
+/* Return -EIO if any error, otherwise return 0. */
+static int btrfs_check_chunk_valid(struct btrfs_root *root,
+                                  struct extent_buffer *leaf,
+                                  struct btrfs_chunk *chunk, u64 logical)
 {
-       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
-       struct map_lookup *map;
-       struct extent_map *em;
-       u64 logical;
        u64 length;
        u64 stripe_len;
-       u64 devid;
-       u8 uuid[BTRFS_UUID_SIZE];
-       int num_stripes;
-       int ret;
-       int i;
+       u16 num_stripes;
+       u16 sub_stripes;
+       u64 type;
 
-       logical = key->offset;
        length = btrfs_chunk_length(leaf, chunk);
        stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
        num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
-       /* Validation check */
+       sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+       type = btrfs_chunk_type(leaf, chunk);
+
        if (!num_stripes) {
                btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
                          num_stripes);
@@ -6281,6 +6286,11 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                          "invalid chunk logical %llu", logical);
                return -EIO;
        }
+       if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
+               btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
+                         btrfs_chunk_sector_size(leaf, chunk));
+               return -EIO;
+       }
        if (!length || !IS_ALIGNED(length, root->sectorsize)) {
                btrfs_err(root->fs_info,
                        "invalid chunk length %llu", length);
@@ -6292,13 +6302,54 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                return -EIO;
        }
        if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
-           btrfs_chunk_type(leaf, chunk)) {
+           type) {
                btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
                          ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
                            BTRFS_BLOCK_GROUP_PROFILE_MASK) &
                          btrfs_chunk_type(leaf, chunk));
                return -EIO;
        }
+       if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+           (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
+           (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+           (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+           (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
+           ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
+            num_stripes != 1)) {
+               btrfs_err(root->fs_info,
+                       "invalid num_stripes:sub_stripes %u:%u for profile %llu",
+                       num_stripes, sub_stripes,
+                       type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
+                         struct extent_buffer *leaf,
+                         struct btrfs_chunk *chunk)
+{
+       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+       struct map_lookup *map;
+       struct extent_map *em;
+       u64 logical;
+       u64 length;
+       u64 stripe_len;
+       u64 devid;
+       u8 uuid[BTRFS_UUID_SIZE];
+       int num_stripes;
+       int ret;
+       int i;
+
+       logical = key->offset;
+       length = btrfs_chunk_length(leaf, chunk);
+       stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+       num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+
+       ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+       if (ret)
+               return ret;
 
        read_lock(&map_tree->map_tree.lock);
        em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
@@ -6546,6 +6597,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        u32 array_size;
        u32 len = 0;
        u32 cur_offset;
+       u64 type;
        struct btrfs_key key;
 
        ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
@@ -6555,8 +6607,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
         * overallocate but we can keep it as-is, only the first page is used.
         */
        sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
-       if (!sb)
-               return -ENOMEM;
+       if (IS_ERR(sb))
+               return PTR_ERR(sb);
        set_extent_buffer_uptodate(sb);
        btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
        /*
@@ -6612,6 +6664,15 @@ int btrfs_read_sys_array(struct btrfs_root *root)
                                break;
                        }
 
+                       type = btrfs_chunk_type(sb, chunk);
+                       if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
+                               btrfs_err(root->fs_info,
+                           "invalid chunk type %llu in sys_array at offset %u",
+                                       type, cur_offset);
+                               ret = -EIO;
+                               break;
+                       }
+
                        len = btrfs_chunk_item_size(num_stripes);
                        if (cur_offset + len > array_size)
                                goto out_short_read;
@@ -6630,12 +6691,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
                sb_array_offset += len;
                cur_offset += len;
        }
+       clear_extent_buffer_uptodate(sb);
        free_extent_buffer_stale(sb);
        return ret;
 
 out_short_read:
        printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
                        len, cur_offset);
+       clear_extent_buffer_uptodate(sb);
        free_extent_buffer_stale(sb);
        return -EIO;
 }
@@ -6648,6 +6711,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
        struct btrfs_key found_key;
        int ret;
        int slot;
+       u64 total_dev = 0;
 
        root = root->fs_info->chunk_root;
 
@@ -6689,6 +6753,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
                        ret = read_one_dev(root, leaf, dev_item);
                        if (ret)
                                goto error;
+                       total_dev++;
                } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
                        struct btrfs_chunk *chunk;
                        chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
@@ -6698,6 +6763,28 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
                }
                path->slots[0]++;
        }
+
+       /*
+        * After loading chunk tree, we've got all device information,
+        * do another round of validation checks.
+        */
+       if (total_dev != root->fs_info->fs_devices->total_devices) {
+               btrfs_err(root->fs_info,
+          "super_num_devices %llu mismatch with num_devices %llu found here",
+                         btrfs_super_num_devices(root->fs_info->super_copy),
+                         total_dev);
+               ret = -EINVAL;
+               goto error;
+       }
+       if (btrfs_super_total_bytes(root->fs_info->super_copy) <
+           root->fs_info->fs_devices->total_rw_bytes) {
+               btrfs_err(root->fs_info,
+       "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
+                         btrfs_super_total_bytes(root->fs_info->super_copy),
+                         root->fs_info->fs_devices->total_rw_bytes);
+               ret = -EINVAL;
+               goto error;
+       }
        ret = 0;
 error:
        unlock_chunks(root);
index 861d611b8c05870063984b9736c4bdb177dc3f3f..ce5f345d70f507a315d76c3d2b4be4aba295124d 100644 (file)
@@ -380,7 +380,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache)
  * check if the backing cache is updated to FS-Cache
  * - called by FS-Cache when evaluates if need to invalidate the cache
  */
-static bool cachefiles_check_consistency(struct fscache_operation *op)
+static int cachefiles_check_consistency(struct fscache_operation *op)
 {
        struct cachefiles_object *object;
        struct cachefiles_cache *cache;
index eeb71e5de27aca7ef1dca3f3a22e3abaa64e467b..26a9d10d75e9d2e49603fe4f5bb6242146000ba3 100644 (file)
@@ -276,8 +276,10 @@ static void finish_read(struct ceph_osd_request *req)
        for (i = 0; i < num_pages; i++) {
                struct page *page = osd_data->pages[i];
 
-               if (rc < 0 && rc != -ENOENT)
+               if (rc < 0 && rc != -ENOENT) {
+                       ceph_fscache_readpage_cancel(inode, page);
                        goto unlock;
+               }
                if (bytes < (int)PAGE_SIZE) {
                        /* zero (remainder of) page */
                        int s = bytes < 0 ? 0 : bytes;
@@ -535,8 +537,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
            CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
                set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
 
-       ceph_readpage_to_fscache(inode, page);
-
        set_page_writeback(page);
        err = ceph_osdc_writepages(osdc, ceph_vino(inode),
                                   &ci->i_layout, snapc,
index c052b5bf219b54d3c3cd0b15f85a3f153c710fbf..238c55b01723d1eef74bd15d0f66895f6feac986 100644 (file)
@@ -25,6 +25,7 @@
 #include "cache.h"
 
 struct ceph_aux_inode {
+       u64             version;
        struct timespec mtime;
        loff_t          size;
 };
@@ -69,15 +70,8 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
        fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
                                              &ceph_fscache_fsid_object_def,
                                              fsc, true);
-
-       if (fsc->fscache == NULL) {
+       if (!fsc->fscache)
                pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
-               return 0;
-       }
-
-       fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1);
-       if (fsc->revalidate_wq == NULL)
-               return -ENOMEM;
 
        return 0;
 }
@@ -105,6 +99,7 @@ static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data,
        const struct inode* inode = &ci->vfs_inode;
 
        memset(&aux, 0, sizeof(aux));
+       aux.version = ci->i_version;
        aux.mtime = inode->i_mtime;
        aux.size = i_size_read(inode);
 
@@ -131,6 +126,7 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux(
                return FSCACHE_CHECKAUX_OBSOLETE;
 
        memset(&aux, 0, sizeof(aux));
+       aux.version = ci->i_version;
        aux.mtime = inode->i_mtime;
        aux.size = i_size_read(inode);
 
@@ -181,32 +177,26 @@ static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
        .now_uncached   = ceph_fscache_inode_now_uncached,
 };
 
-void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
-                                       struct ceph_inode_info* ci)
+void ceph_fscache_register_inode_cookie(struct inode *inode)
 {
-       struct inode* inode = &ci->vfs_inode;
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 
        /* No caching for filesystem */
        if (fsc->fscache == NULL)
                return;
 
        /* Only cache for regular files that are read only */
-       if ((ci->vfs_inode.i_mode & S_IFREG) == 0)
+       if (!S_ISREG(inode->i_mode))
                return;
 
-       /* Avoid multiple racing open requests */
-       inode_lock(inode);
-
-       if (ci->fscache)
-               goto done;
-
-       ci->fscache = fscache_acquire_cookie(fsc->fscache,
-                                            &ceph_fscache_inode_object_def,
-                                            ci, true);
-       fscache_check_consistency(ci->fscache);
-done:
+       inode_lock_nested(inode, I_MUTEX_CHILD);
+       if (!ci->fscache) {
+               ci->fscache = fscache_acquire_cookie(fsc->fscache,
+                                       &ceph_fscache_inode_object_def,
+                                       ci, false);
+       }
        inode_unlock(inode);
-
 }
 
 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
@@ -222,6 +212,34 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
        fscache_relinquish_cookie(cookie, 0);
 }
 
+static bool ceph_fscache_can_enable(void *data)
+{
+       struct inode *inode = data;
+       return !inode_is_open_for_write(inode);
+}
+
+void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
+{
+       struct ceph_inode_info *ci = ceph_inode(inode);
+
+       if (!fscache_cookie_valid(ci->fscache))
+               return;
+
+       if (inode_is_open_for_write(inode)) {
+               dout("fscache_file_set_cookie %p %p disabling cache\n",
+                    inode, filp);
+               fscache_disable_cookie(ci->fscache, false);
+               fscache_uncache_all_inode_pages(ci->fscache, inode);
+       } else {
+               fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable,
+                               inode);
+               if (fscache_cookie_enabled(ci->fscache)) {
+                       dout("fscache_file_set_cookie %p %p enabing cache\n",
+                            inode, filp);
+               }
+       }
+}
+
 static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
 {
        if (!error)
@@ -238,8 +256,7 @@ static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int
 
 static inline bool cache_valid(struct ceph_inode_info *ci)
 {
-       return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) &&
-               (ci->i_fscache_gen == ci->i_rdcache_gen));
+       return ci->i_fscache_gen == ci->i_rdcache_gen;
 }
 
 
@@ -332,69 +349,27 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
 
 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
 {
-       if (fsc->revalidate_wq)
-               destroy_workqueue(fsc->revalidate_wq);
-
        fscache_relinquish_cookie(fsc->fscache, 0);
        fsc->fscache = NULL;
 }
 
-static void ceph_revalidate_work(struct work_struct *work)
-{
-       int issued;
-       u32 orig_gen;
-       struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
-                                                 i_revalidate_work);
-       struct inode *inode = &ci->vfs_inode;
-
-       spin_lock(&ci->i_ceph_lock);
-       issued = __ceph_caps_issued(ci, NULL);
-       orig_gen = ci->i_rdcache_gen;
-       spin_unlock(&ci->i_ceph_lock);
-
-       if (!(issued & CEPH_CAP_FILE_CACHE)) {
-               dout("revalidate_work lost cache before validation %p\n",
-                    inode);
-               goto out;
-       }
-
-       if (!fscache_check_consistency(ci->fscache))
-               fscache_invalidate(ci->fscache);
-
-       spin_lock(&ci->i_ceph_lock);
-       /* Update the new valid generation (backwards sanity check too) */
-       if (orig_gen > ci->i_fscache_gen) {
-               ci->i_fscache_gen = orig_gen;
-       }
-       spin_unlock(&ci->i_ceph_lock);
-
-out:
-       iput(&ci->vfs_inode);
-}
-
-void ceph_queue_revalidate(struct inode *inode)
+/*
+ * caller should hold CEPH_CAP_FILE_{RD,CACHE}
+ */
+void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
-       struct ceph_inode_info *ci = ceph_inode(inode);
-
-       if (fsc->revalidate_wq == NULL || ci->fscache == NULL)
+       if (cache_valid(ci))
                return;
 
-       ihold(inode);
-
-       if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq,
-                      &ci->i_revalidate_work)) {
-               dout("ceph_queue_revalidate %p\n", inode);
-       } else {
-               dout("ceph_queue_revalidate %p failed\n)", inode);
-               iput(inode);
+       /* resue i_truncate_mutex. There should be no pending
+        * truncate while the caller holds CEPH_CAP_FILE_RD */
+       mutex_lock(&ci->i_truncate_mutex);
+       if (!cache_valid(ci)) {
+               if (fscache_check_consistency(ci->fscache))
+                       fscache_invalidate(ci->fscache);
+               spin_lock(&ci->i_ceph_lock);
+               ci->i_fscache_gen = ci->i_rdcache_gen;
+               spin_unlock(&ci->i_ceph_lock);
        }
-}
-
-void ceph_fscache_inode_init(struct ceph_inode_info *ci)
-{
-       ci->fscache = NULL;
-       /* The first load is verifed cookie open time */
-       ci->i_fscache_gen = 1;
-       INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work);
+       mutex_unlock(&ci->i_truncate_mutex);
 }
index 5ac591bd012bc8cd3653cdd8f9ca731511a051d1..7e72c7594f0c8aeee7e203946389376dac3dfaa2 100644 (file)
@@ -34,10 +34,10 @@ void ceph_fscache_unregister(void);
 int ceph_fscache_register_fs(struct ceph_fs_client* fsc);
 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc);
 
-void ceph_fscache_inode_init(struct ceph_inode_info *ci);
-void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
-                                       struct ceph_inode_info* ci);
+void ceph_fscache_register_inode_cookie(struct inode *inode);
 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci);
+void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp);
+void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci);
 
 int ceph_readpage_from_fscache(struct inode *inode, struct page *page);
 int ceph_readpages_from_fscache(struct inode *inode,
@@ -46,12 +46,11 @@ int ceph_readpages_from_fscache(struct inode *inode,
                                unsigned *nr_pages);
 void ceph_readpage_to_fscache(struct inode *inode, struct page *page);
 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page);
-void ceph_queue_revalidate(struct inode *inode);
 
-static inline void ceph_fscache_update_objectsize(struct inode *inode)
+static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
 {
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       fscache_attr_changed(ci->fscache);
+       ci->fscache = NULL;
+       ci->i_fscache_gen = 0;
 }
 
 static inline void ceph_fscache_invalidate(struct inode *inode)
@@ -88,6 +87,11 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode,
        return fscache_readpages_cancel(ci->fscache, pages);
 }
 
+static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
+{
+       ci->i_fscache_gen = ci->i_rdcache_gen - 1;
+}
+
 #else
 
 static inline int ceph_fscache_register(void)
@@ -112,8 +116,20 @@ static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
 {
 }
 
-static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc,
-                                                     struct ceph_inode_info* ci)
+static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
+{
+}
+
+static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+{
+}
+
+static inline void ceph_fscache_file_set_cookie(struct inode *inode,
+                                               struct file *filp)
+{
+}
+
+static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
 {
 }
 
@@ -141,10 +157,6 @@ static inline void ceph_readpage_to_fscache(struct inode *inode,
 {
 }
 
-static inline void ceph_fscache_update_objectsize(struct inode *inode)
-{
-}
-
 static inline void ceph_fscache_invalidate(struct inode *inode)
 {
 }
@@ -154,10 +166,6 @@ static inline void ceph_invalidate_fscache_page(struct inode *inode,
 {
 }
 
-static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
-{
-}
-
 static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
 {
        return 1;
@@ -173,7 +181,7 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode,
 {
 }
 
-static inline void ceph_queue_revalidate(struct inode *inode)
+static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci)
 {
 }
 
index c17b5d76d75ee96515717ce29cc62971f80acca4..6f60d0a3d0f9c36549dc1218965600dd02e383c1 100644 (file)
@@ -2393,6 +2393,9 @@ again:
                                snap_rwsem_locked = true;
                        }
                        *got = need | (have & want);
+                       if ((need & CEPH_CAP_FILE_RD) &&
+                           !(*got & CEPH_CAP_FILE_CACHE))
+                               ceph_disable_fscache_readpage(ci);
                        __take_cap_refs(ci, *got, true);
                        ret = 1;
                }
@@ -2554,6 +2557,9 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
                break;
        }
 
+       if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
+               ceph_fscache_revalidate_cookie(ci);
+
        *got = _got;
        return 0;
 }
@@ -2795,7 +2801,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
        bool writeback = false;
        bool queue_trunc = false;
        bool queue_invalidate = false;
-       bool queue_revalidate = false;
        bool deleted_inode = false;
        bool fill_inline = false;
 
@@ -2837,8 +2842,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
                                ci->i_rdcache_revoking = ci->i_rdcache_gen;
                        }
                }
-
-               ceph_fscache_invalidate(inode);
        }
 
        /* side effects now are allowed */
@@ -2880,11 +2883,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
                }
        }
 
-       /* Do we need to revalidate our fscache cookie. Don't bother on the
-        * first cache cap as we already validate at cookie creation time. */
-       if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
-               queue_revalidate = true;
-
        if (newcaps & CEPH_CAP_ANY_RD) {
                /* ctime/mtime/atime? */
                ceph_decode_timespec(&mtime, &grant->mtime);
@@ -2993,11 +2991,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
        if (fill_inline)
                ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
 
-       if (queue_trunc) {
+       if (queue_trunc)
                ceph_queue_vmtruncate(inode);
-               ceph_queue_revalidate(inode);
-       } else if (queue_revalidate)
-               ceph_queue_revalidate(inode);
 
        if (writeback)
                /*
@@ -3199,10 +3194,8 @@ static void handle_cap_trunc(struct inode *inode,
                                          truncate_seq, truncate_size, size);
        spin_unlock(&ci->i_ceph_lock);
 
-       if (queue_trunc) {
+       if (queue_trunc)
                ceph_queue_vmtruncate(inode);
-               ceph_fscache_invalidate(inode);
-       }
 }
 
 /*
index 6e72c98162d5351a9ad27b581eb17737eb4a1203..1780218a48f0843e2fa0777831d958cb4c4e9a37 100644 (file)
@@ -95,10 +95,8 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
        }
 
        dentry = d_obtain_alias(inode);
-       if (IS_ERR(dentry)) {
-               iput(inode);
+       if (IS_ERR(dentry))
                return dentry;
-       }
        err = ceph_init_dentry(dentry);
        if (err < 0) {
                dput(dentry);
@@ -167,10 +165,8 @@ static struct dentry *__get_parent(struct super_block *sb,
                return ERR_PTR(-ENOENT);
 
        dentry = d_obtain_alias(inode);
-       if (IS_ERR(dentry)) {
-               iput(inode);
+       if (IS_ERR(dentry))
                return dentry;
-       }
        err = ceph_init_dentry(dentry);
        if (err < 0) {
                dput(dentry);
@@ -210,7 +206,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
 
        dout("fh_to_parent %llx\n", cfh->parent_ino);
        dentry = __get_parent(sb, NULL, cfh->ino);
-       if (IS_ERR(dentry) && PTR_ERR(dentry) == -ENOENT)
+       if (unlikely(dentry == ERR_PTR(-ENOENT)))
                dentry = __fh_to_dentry(sb, cfh->parent_ino);
        return dentry;
 }
index a888df6f2d71b99a2e01890c25ff40d48c4dd649..0daaf7ceedc55f3769abb441a2477ea9fe4f0f75 100644 (file)
@@ -137,23 +137,11 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 {
        struct ceph_file_info *cf;
        int ret = 0;
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
-       struct ceph_mds_client *mdsc = fsc->mdsc;
 
        switch (inode->i_mode & S_IFMT) {
        case S_IFREG:
-               /* First file open request creates the cookie, we want to keep
-                * this cookie around for the filetime of the inode as not to
-                * have to worry about fscache register / revoke / operation
-                * races.
-                *
-                * Also, if we know the operation is going to invalidate data
-                * (non readonly) just nuke the cache right away.
-                */
-               ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
-               if ((fmode & CEPH_FILE_MODE_WR))
-                       ceph_fscache_invalidate(inode);
+               ceph_fscache_register_inode_cookie(inode);
+               ceph_fscache_file_set_cookie(inode, file);
        case S_IFDIR:
                dout("init_file %p %p 0%o (regular)\n", inode, file,
                     inode->i_mode);
@@ -406,7 +394,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
                err = ceph_handle_notrace_create(dir, dentry);
 
-       if (d_unhashed(dentry)) {
+       if (d_in_lookup(dentry)) {
                dn = ceph_finish_lookup(req, dentry, err);
                if (IS_ERR(dn))
                        err = PTR_ERR(dn);
@@ -1349,7 +1337,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        }
 
 retry_snap:
-       if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
+       if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
                err = -ENOSPC;
                goto out;
        }
@@ -1407,7 +1395,6 @@ retry_snap:
                        iov_iter_advance(from, written);
                ceph_put_snap_context(snapc);
        } else {
-               loff_t old_size = i_size_read(inode);
                /*
                 * No need to acquire the i_truncate_mutex. Because
                 * the MDS revokes Fwb caps before sending truncate
@@ -1418,8 +1405,6 @@ retry_snap:
                written = generic_perform_write(file, from, pos);
                if (likely(written >= 0))
                        iocb->ki_pos = pos + written;
-               if (i_size_read(inode) > old_size)
-                       ceph_fscache_update_objectsize(inode);
                inode_unlock(inode);
        }
 
@@ -1440,7 +1425,7 @@ retry_snap:
        ceph_put_cap_refs(ci, got);
 
        if (written >= 0) {
-               if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))
+               if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
                        iocb->ki_flags |= IOCB_DSYNC;
 
                written = generic_write_sync(iocb, written);
@@ -1672,8 +1657,8 @@ static long ceph_fallocate(struct file *file, int mode,
                goto unlock;
        }
 
-       if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
-               !(mode & FALLOC_FL_PUNCH_HOLE)) {
+       if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
+           !(mode & FALLOC_FL_PUNCH_HOLE)) {
                ret = -ENOSPC;
                goto unlock;
        }
index 0130a85921917faa6b5cf40c263468460921eab6..0168b49fb6adb2be232a4861005c8f290f452701 100644 (file)
@@ -103,7 +103,6 @@ struct ceph_fs_client {
 
 #ifdef CONFIG_CEPH_FSCACHE
        struct fscache_cookie *fscache;
-       struct workqueue_struct *revalidate_wq;
 #endif
 };
 
@@ -360,8 +359,7 @@ struct ceph_inode_info {
 
 #ifdef CONFIG_CEPH_FSCACHE
        struct fscache_cookie *fscache;
-       u32 i_fscache_gen; /* sequence, for delayed fscache validate */
-       struct work_struct i_revalidate_work;
+       u32 i_fscache_gen;
 #endif
        struct inode vfs_inode; /* at end */
 };
index 5a53ac6b1e02515be90a4e446b103aa9f6f26874..02b071bf3732ac29808183974636e2ac9ca56ce6 100644 (file)
@@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
        case SFM_SLASH:
                *target = '\\';
                break;
+       case SFM_SPACE:
+               *target = ' ';
+               break;
+       case SFM_PERIOD:
+               *target = '.';
+               break;
        default:
                return false;
        }
@@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
        return dest_char;
 }
 
-static __le16 convert_to_sfm_char(char src_char)
+static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
 {
        __le16 dest_char;
 
@@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
        case '|':
                dest_char = cpu_to_le16(SFM_PIPE);
                break;
+       case '.':
+               if (end_of_string)
+                       dest_char = cpu_to_le16(SFM_PERIOD);
+               else
+                       dest_char = 0;
+               break;
+       case ' ':
+               if (end_of_string)
+                       dest_char = cpu_to_le16(SFM_SPACE);
+               else
+                       dest_char = 0;
+               break;
        default:
                dest_char = 0;
        }
@@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
                /* see if we must remap this char */
                if (map_chars == SFU_MAP_UNI_RSVD)
                        dst_char = convert_to_sfu_char(src_char);
-               else if (map_chars == SFM_MAP_UNI_RSVD)
-                       dst_char = convert_to_sfm_char(src_char);
-               else
+               else if (map_chars == SFM_MAP_UNI_RSVD) {
+                       bool end_of_string;
+
+                       if (i == srclen - 1)
+                               end_of_string = true;
+                       else
+                               end_of_string = false;
+
+                       dst_char = convert_to_sfm_char(src_char, end_of_string);
+               } else
                        dst_char = 0;
                /*
                 * FIXME: We can not handle remapping backslash (UNI_SLASH)
index bdc52cb9a676db0a9041d7e198810ce8df2aca9a..479bc0a941f35f79056dedc45ab99825669f1e6e 100644 (file)
@@ -64,6 +64,8 @@
 #define SFM_LESSTHAN    ((__u16) 0xF023)
 #define SFM_PIPE        ((__u16) 0xF027)
 #define SFM_SLASH       ((__u16) 0xF026)
+#define SFM_PERIOD     ((__u16) 0xF028)
+#define SFM_SPACE      ((__u16) 0xF029)
 
 /*
  * Mapping mechanism to use when one of the seven reserved characters is
index 5d8b7edf8a8f69009114e56e28c530fb4e7e686f..5d841f39c4b70e853e5b74d1cb6e01e7d112fd25 100644 (file)
@@ -87,6 +87,7 @@ extern mempool_t *cifs_req_poolp;
 extern mempool_t *cifs_mid_poolp;
 
 struct workqueue_struct        *cifsiod_wq;
+__u32 cifs_lock_secret;
 
 /*
  * Bumps refcount for cifs super block.
@@ -1266,6 +1267,8 @@ init_cifs(void)
        spin_lock_init(&cifs_file_list_lock);
        spin_lock_init(&GlobalMid_Lock);
 
+       get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
+
        if (cifs_max_pending < 2) {
                cifs_max_pending = 2;
                cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
index bba106cdc43c4c5703568ac82016714983e1d7bc..8f1d8c1e72bece412a926d0e20b9bdc95a7fa9ee 100644 (file)
@@ -1619,6 +1619,7 @@ void cifs_oplock_break(struct work_struct *work);
 
 extern const struct slow_work_ops cifs_oplock_break_ops;
 extern struct workqueue_struct *cifsiod_wq;
+extern __u32 cifs_lock_secret;
 
 extern mempool_t *cifs_mid_poolp;
 
index 66736f57b5abdf785a724aab8701d620abbb7898..7d2b15c060909b103e96a90a330c6f0ed7ecd2d0 100644 (file)
@@ -428,7 +428,9 @@ cifs_echo_request(struct work_struct *work)
         * server->ops->need_neg() == true. Also, no need to ping if
         * we got a response recently.
         */
-       if (!server->ops->need_neg || server->ops->need_neg(server) ||
+
+       if (server->tcpStatus == CifsNeedReconnect ||
+           server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
            (server->ops->can_echo && !server->ops->can_echo(server)) ||
            time_before(jiffies, server->lstrp + echo_interval - HZ))
                goto requeue_echo;
index c3eb998a99bd18a2ed9b7b843c99be15fedab9df..fb0903fffc22c8738c34f958beb7784400a93339 100644 (file)
@@ -445,7 +445,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                 * Check for hashed negative dentry. We have already revalidated
                 * the dentry and it is fine. No need to perform another lookup.
                 */
-               if (!d_unhashed(direntry))
+               if (!d_in_lookup(direntry))
                        return -ENOENT;
 
                res = cifs_lookup(inode, direntry, 0);
index 9793ae0bcaa2bc678f9a688c167ef2b091cc858f..d4890b6dc22db0a4c7345b195e0209c5d32cf0b0 100644 (file)
@@ -1112,6 +1112,12 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
        return rc;
 }
 
+static __u32
+hash_lockowner(fl_owner_t owner)
+{
+       return cifs_lock_secret ^ hash32_ptr((const void *)owner);
+}
+
 struct lock_to_push {
        struct list_head llist;
        __u64 offset;
@@ -1178,7 +1184,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
                else
                        type = CIFS_WRLCK;
                lck = list_entry(el, struct lock_to_push, llist);
-               lck->pid = flock->fl_pid;
+               lck->pid = hash_lockowner(flock->fl_owner);
                lck->netfid = cfile->fid.netfid;
                lck->length = length;
                lck->type = type;
@@ -1305,7 +1311,8 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
                        posix_lock_type = CIFS_RDLCK;
                else
                        posix_lock_type = CIFS_WRLCK;
-               rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
+               rc = CIFSSMBPosixLock(xid, tcon, netfid,
+                                     hash_lockowner(flock->fl_owner),
                                      flock->fl_start, length, flock,
                                      posix_lock_type, wait_flag);
                return rc;
@@ -1505,7 +1512,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
                        posix_lock_type = CIFS_UNLCK;
 
                rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
-                                     current->tgid, flock->fl_start, length,
+                                     hash_lockowner(flock->fl_owner),
+                                     flock->fl_start, length,
                                      NULL, posix_lock_type, wait_flag);
                goto out;
        }
index 848249fa120fc270d751d02597107a20319cfa29..3079b38f0afbd197bc0fc9f27df086a92ab90fbf 100644 (file)
@@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
 
 int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
 void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
+int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
                        struct cifs_ses *ses,
                        const struct nls_table *nls_cp);
index af0ec2d5ad0e9da25af4ec8528266451a42cc8b5..538d9b55699a10f1185b8df25a1f36f9a24ae75c 100644 (file)
@@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
        sec_blob->DomainName.MaximumLength = 0;
 }
 
-/* We do not malloc the blob, it is passed in pbuffer, because its
-   maximum possible size is fixed and small, making this approach cleaner.
-   This function returns the length of the data in the blob */
-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+static int size_of_ntlmssp_blob(struct cifs_ses *ses)
+{
+       int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
+               - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
+
+       if (ses->domainName)
+               sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+       else
+               sz += 2;
+
+       if (ses->user_name)
+               sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+       else
+               sz += 2;
+
+       return sz;
+}
+
+int build_ntlmssp_auth_blob(unsigned char **pbuffer,
                                        u16 *buflen,
                                   struct cifs_ses *ses,
                                   const struct nls_table *nls_cp)
 {
        int rc;
-       AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
+       AUTHENTICATE_MESSAGE *sec_blob;
        __u32 flags;
        unsigned char *tmp;
 
+       rc = setup_ntlmv2_rsp(ses, nls_cp);
+       if (rc) {
+               cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+               *buflen = 0;
+               goto setup_ntlmv2_ret;
+       }
+       *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+       sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
+
        memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
        sec_blob->MessageType = NtLmAuthenticate;
 
@@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                        flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
        }
 
-       tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+       tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
        sec_blob->NegotiateFlags = cpu_to_le32(flags);
 
        sec_blob->LmChallengeResponse.BufferOffset =
@@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        sec_blob->LmChallengeResponse.Length = 0;
        sec_blob->LmChallengeResponse.MaximumLength = 0;
 
-       sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+       sec_blob->NtChallengeResponse.BufferOffset =
+                               cpu_to_le32(tmp - *pbuffer);
        if (ses->user_name != NULL) {
-               rc = setup_ntlmv2_rsp(ses, nls_cp);
-               if (rc) {
-                       cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
-                       goto setup_ntlmv2_ret;
-               }
                memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
                                ses->auth_key.len - CIFS_SESS_KEY_SIZE);
                tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
@@ -423,23 +443,23 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
        }
 
        if (ses->domainName == NULL) {
-               sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->DomainName.Length = 0;
                sec_blob->DomainName.MaximumLength = 0;
                tmp += 2;
        } else {
                int len;
                len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
-                                     CIFS_MAX_USERNAME_LEN, nls_cp);
+                                     CIFS_MAX_DOMAINNAME_LEN, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
-               sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->DomainName.Length = cpu_to_le16(len);
                sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
                tmp += len;
        }
 
        if (ses->user_name == NULL) {
-               sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->UserName.Length = 0;
                sec_blob->UserName.MaximumLength = 0;
                tmp += 2;
@@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
                                      CIFS_MAX_USERNAME_LEN, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
-               sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->UserName.Length = cpu_to_le16(len);
                sec_blob->UserName.MaximumLength = cpu_to_le16(len);
                tmp += len;
        }
 
-       sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+       sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
        sec_blob->WorkstationName.Length = 0;
        sec_blob->WorkstationName.MaximumLength = 0;
        tmp += 2;
@@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
                        && !calc_seckey(ses)) {
                memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
-               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
                sec_blob->SessionKey.MaximumLength =
                                cpu_to_le16(CIFS_CPHTXT_SIZE);
                tmp += CIFS_CPHTXT_SIZE;
        } else {
-               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
+               sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
                sec_blob->SessionKey.Length = 0;
                sec_blob->SessionKey.MaximumLength = 0;
        }
 
+       *buflen = tmp - *pbuffer;
 setup_ntlmv2_ret:
-       *buflen = tmp - pbuffer;
        return rc;
 }
 
@@ -690,6 +710,8 @@ sess_auth_lanman(struct sess_data *sess_data)
                rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
                                      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
                                      true : false, lnm_session_key);
+               if (rc)
+                       goto out;
 
                memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
                bcc_ptr += CIFS_AUTH_RESP_SIZE;
@@ -1266,7 +1288,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
        struct cifs_ses *ses = sess_data->ses;
        __u16 bytes_remaining;
        char *bcc_ptr;
-       char *ntlmsspblob = NULL;
+       unsigned char *ntlmsspblob = NULL;
        u16 blob_len;
 
        cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
@@ -1279,19 +1301,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
        /* Build security blob before we assemble the request */
        pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
        smb_buf = (struct smb_hdr *)pSMB;
-       /*
-        * 5 is an empirical value, large enough to hold
-        * authenticate message plus max 10 of av paris,
-        * domain, user, workstation names, flags, etc.
-        */
-       ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
-                               GFP_KERNEL);
-       if (!ntlmsspblob) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
-       rc = build_ntlmssp_auth_blob(ntlmsspblob,
+       rc = build_ntlmssp_auth_blob(&ntlmsspblob,
                                        &blob_len, ses, sess_data->nls_cp);
        if (rc)
                goto out_free_ntlmsspblob;
index 8f38e33d365bcca5ce9079dc64e5d10525df801c..29e06db5f187bea7d4f5ce29d2cf5c0faad6ae09 100644 (file)
@@ -588,7 +588,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
        u16 blob_length = 0;
        struct key *spnego_key = NULL;
        char *security_blob = NULL;
-       char *ntlmssp_blob = NULL;
+       unsigned char *ntlmssp_blob = NULL;
        bool use_spnego = false; /* else use raw ntlmssp */
 
        cifs_dbg(FYI, "Session Setup\n");
@@ -713,13 +713,7 @@ ssetup_ntlmssp_authenticate:
                iov[1].iov_len = blob_length;
        } else if (phase == NtLmAuthenticate) {
                req->hdr.SessionId = ses->Suid;
-               ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
-                                      GFP_KERNEL);
-               if (ntlmssp_blob == NULL) {
-                       rc = -ENOMEM;
-                       goto ssetup_exit;
-               }
-               rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
+               rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
                                             nls_cp);
                if (rc) {
                        cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
@@ -1818,6 +1812,33 @@ SMB2_echo(struct TCP_Server_Info *server)
 
        cifs_dbg(FYI, "In echo request\n");
 
+       if (server->tcpStatus == CifsNeedNegotiate) {
+               struct list_head *tmp, *tmp2;
+               struct cifs_ses *ses;
+               struct cifs_tcon *tcon;
+
+               cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
+               spin_lock(&cifs_tcp_ses_lock);
+               list_for_each(tmp, &server->smb_ses_list) {
+                       ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+                       list_for_each(tmp2, &ses->tcon_list) {
+                               tcon = list_entry(tmp2, struct cifs_tcon,
+                                                 tcon_list);
+                               /* add check for persistent handle reconnect */
+                               if (tcon && tcon->need_reconnect) {
+                                       spin_unlock(&cifs_tcp_ses_lock);
+                                       rc = smb2_reconnect(SMB2_ECHO, tcon);
+                                       spin_lock(&cifs_tcp_ses_lock);
+                               }
+                       }
+               }
+               spin_unlock(&cifs_tcp_ses_lock);
+       }
+
+       /* if no session, renegotiate failed above */
+       if (server->tcpStatus == CifsNeedNegotiate)
+               return -EIO;
+
        rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
        if (rc)
                return rc;
index 33b7ee34eda5f135fef480f0bdf55bb4037ab334..bbc1252a59f5f1431ee779e16780f19a5f47a5d8 100644 (file)
@@ -357,8 +357,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
 
        len = simple_write_to_buffer(buffer->bin_buffer,
                        buffer->bin_buffer_size, ppos, buf, count);
-       if (len > 0)
-               *ppos += len;
 out:
        mutex_unlock(&buffer->mutex);
        return len;
index 38a7ab87e10a80d9d62d80b74dd58c1f2989a966..281b768000e664e4d4ef9092d4bb567d003623a0 100644 (file)
@@ -794,6 +794,7 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
                        return 0;
                file->f_pos = pos;
                cprm->written += n;
+               cprm->pos += n;
                nr -= n;
        }
        return 1;
@@ -808,6 +809,7 @@ int dump_skip(struct coredump_params *cprm, size_t nr)
                if (dump_interrupted() ||
                    file->f_op->llseek(file, nr, SEEK_CUR) < 0)
                        return 0;
+               cprm->pos += nr;
                return 1;
        } else {
                while (nr > PAGE_SIZE) {
@@ -822,7 +824,7 @@ EXPORT_SYMBOL(dump_skip);
 
 int dump_align(struct coredump_params *cprm, int align)
 {
-       unsigned mod = cprm->file->f_pos & (align - 1);
+       unsigned mod = cprm->pos & (align - 1);
        if (align & (align - 1))
                return 0;
        return mod ? dump_skip(cprm, align - mod) : 1;
index 761495bf5eb91d97c6483646d89a67f10933864f..e207f8f9b7007bfa9c9a44dd7d24cc1407bac098 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -208,7 +208,12 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
                                dax.addr += first;
                                size = map_len - first;
                        }
-                       max = min(pos + size, end);
+                       /*
+                        * pos + size is one past the last offset for IO,
+                        * so pos + size can overflow loff_t at extreme offsets.
+                        * Cast to u64 to catch this and get the true minimum.
+                        */
+                       max = min_t(u64, pos + size, end);
                }
 
                if (iov_iter_rw(iter) == WRITE) {
index ad4a542e9babdceb321a77ef5c8837ad70177a6a..d6847d7b123d4ca0fe4a9100b751b673d8801cd9 100644 (file)
@@ -507,6 +507,44 @@ void d_drop(struct dentry *dentry)
 }
 EXPORT_SYMBOL(d_drop);
 
+static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
+{
+       struct dentry *next;
+       /*
+        * Inform d_walk() and shrink_dentry_list() that we are no longer
+        * attached to the dentry tree
+        */
+       dentry->d_flags |= DCACHE_DENTRY_KILLED;
+       if (unlikely(list_empty(&dentry->d_child)))
+               return;
+       __list_del_entry(&dentry->d_child);
+       /*
+        * Cursors can move around the list of children.  While we'd been
+        * a normal list member, it didn't matter - ->d_child.next would've
+        * been updated.  However, from now on it won't be and for the
+        * things like d_walk() it might end up with a nasty surprise.
+        * Normally d_walk() doesn't care about cursors moving around -
+        * ->d_lock on parent prevents that and since a cursor has no children
+        * of its own, we get through it without ever unlocking the parent.
+        * There is one exception, though - if we ascend from a child that
+        * gets killed as soon as we unlock it, the next sibling is found
+        * using the value left in its ->d_child.next.  And if _that_
+        * pointed to a cursor, and cursor got moved (e.g. by lseek())
+        * before d_walk() regains parent->d_lock, we'll end up skipping
+        * everything the cursor had been moved past.
+        *
+        * Solution: make sure that the pointer left behind in ->d_child.next
+        * points to something that won't be moving around.  I.e. skip the
+        * cursors.
+        */
+       while (dentry->d_child.next != &parent->d_subdirs) {
+               next = list_entry(dentry->d_child.next, struct dentry, d_child);
+               if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
+                       break;
+               dentry->d_child.next = next->d_child.next;
+       }
+}
+
 static void __dentry_kill(struct dentry *dentry)
 {
        struct dentry *parent = NULL;
@@ -532,12 +570,7 @@ static void __dentry_kill(struct dentry *dentry)
        }
        /* if it was on the hash then remove it */
        __d_drop(dentry);
-       __list_del_entry(&dentry->d_child);
-       /*
-        * Inform d_walk() that we are no longer attached to the
-        * dentry tree
-        */
-       dentry->d_flags |= DCACHE_DENTRY_KILLED;
+       dentry_unlist(dentry, parent);
        if (parent)
                spin_unlock(&parent->d_lock);
        dentry_iput(dentry);
@@ -1203,6 +1236,9 @@ resume:
                struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
                next = tmp->next;
 
+               if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
+                       continue;
+
                spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 
                ret = enter(data, dentry);
@@ -1636,7 +1672,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
        struct dentry *dentry = __d_alloc(parent->d_sb, name);
        if (!dentry)
                return NULL;
-
+       dentry->d_flags |= DCACHE_RCUACCESS;
        spin_lock(&parent->d_lock);
        /*
         * don't need child lock because it is not subject
@@ -1651,6 +1687,16 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
 }
 EXPORT_SYMBOL(d_alloc);
 
+struct dentry *d_alloc_cursor(struct dentry * parent)
+{
+       struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
+       if (dentry) {
+               dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
+               dentry->d_parent = dget(parent);
+       }
+       return dentry;
+}
+
 /**
  * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
  * @sb: the superblock
@@ -2358,7 +2404,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
 {
        BUG_ON(!d_unhashed(entry));
        hlist_bl_lock(b);
-       entry->d_flags |= DCACHE_RCUACCESS;
        hlist_bl_add_head_rcu(&entry->d_hash, b);
        hlist_bl_unlock(b);
 }
@@ -2458,7 +2503,6 @@ retry:
                rcu_read_unlock();
                goto retry;
        }
-       rcu_read_unlock();
        /*
         * No changes for the parent since the beginning of d_lookup().
         * Since all removals from the chain happen with hlist_bl_lock(),
@@ -2471,8 +2515,6 @@ retry:
                        continue;
                if (dentry->d_parent != parent)
                        continue;
-               if (d_unhashed(dentry))
-                       continue;
                if (parent->d_flags & DCACHE_OP_COMPARE) {
                        int tlen = dentry->d_name.len;
                        const char *tname = dentry->d_name.name;
@@ -2484,9 +2526,18 @@ retry:
                        if (dentry_cmp(dentry, str, len))
                                continue;
                }
-               dget(dentry);
                hlist_bl_unlock(b);
-               /* somebody is doing lookup for it right now; wait for it */
+               /* now we can try to grab a reference */
+               if (!lockref_get_not_dead(&dentry->d_lockref)) {
+                       rcu_read_unlock();
+                       goto retry;
+               }
+
+               rcu_read_unlock();
+               /*
+                * somebody is likely to be still doing lookup for it;
+                * wait for them to finish
+                */
                spin_lock(&dentry->d_lock);
                d_wait_lookup(dentry);
                /*
@@ -2517,6 +2568,7 @@ retry:
                dput(new);
                return dentry;
        }
+       rcu_read_unlock();
        /* we can't take ->d_lock here; it's OK, though. */
        new->d_flags |= DCACHE_PAR_LOOKUP;
        new->d_wait = wq;
@@ -2843,6 +2895,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
        /* ... and switch them in the tree */
        if (IS_ROOT(dentry)) {
                /* splicing a tree */
+               dentry->d_flags |= DCACHE_RCUACCESS;
                dentry->d_parent = target->d_parent;
                target->d_parent = target;
                list_del_init(&target->d_child);
index 9c1c9a01b7e5120da23888828300d3f1b16e04c4..592059f88e04f7757c9e3bc413ab78bec58dee09 100644 (file)
@@ -127,7 +127,6 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
                r = real_fops->open(inode, filp);
 
 out:
-       fops_put(real_fops);
        debugfs_use_file_finish(srcu_idx);
        return r;
 }
@@ -262,8 +261,10 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
 
        if (real_fops->open) {
                r = real_fops->open(inode, filp);
-
-               if (filp->f_op != proxy_fops) {
+               if (r) {
+                       replace_fops(filp, d_inode(dentry)->i_fop);
+                       goto free_proxy;
+               } else if (filp->f_op != proxy_fops) {
                        /* No protection against file removal anymore. */
                        WARN(1, "debugfs file owner replaced proxy fops: %pd",
                                dentry);
index 0b2954d7172d78e78ac98238dfb7b50c5dccb558..37c134a132c7dbaa561cbfe287dec1f6499fa624 100644 (file)
@@ -95,8 +95,6 @@ static struct ctl_table pty_root_table[] = {
 
 static DEFINE_MUTEX(allocated_ptys_lock);
 
-static struct vfsmount *devpts_mnt;
-
 struct pts_mount_opts {
        int setuid;
        int setgid;
@@ -104,7 +102,7 @@ struct pts_mount_opts {
        kgid_t   gid;
        umode_t mode;
        umode_t ptmxmode;
-       int newinstance;
+       int reserve;
        int max;
 };
 
@@ -117,11 +115,9 @@ static const match_table_t tokens = {
        {Opt_uid, "uid=%u"},
        {Opt_gid, "gid=%u"},
        {Opt_mode, "mode=%o"},
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
        {Opt_ptmxmode, "ptmxmode=%o"},
        {Opt_newinstance, "newinstance"},
        {Opt_max, "max=%d"},
-#endif
        {Opt_err, NULL}
 };
 
@@ -137,15 +133,48 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb)
        return sb->s_fs_info;
 }
 
-static inline struct super_block *pts_sb_from_inode(struct inode *inode)
+struct pts_fs_info *devpts_acquire(struct file *filp)
 {
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
-       if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
-               return inode->i_sb;
-#endif
-       if (!devpts_mnt)
-               return NULL;
-       return devpts_mnt->mnt_sb;
+       struct pts_fs_info *result;
+       struct path path;
+       struct super_block *sb;
+       int err;
+
+       path = filp->f_path;
+       path_get(&path);
+
+       /* Has the devpts filesystem already been found? */
+       sb = path.mnt->mnt_sb;
+       if (sb->s_magic != DEVPTS_SUPER_MAGIC) {
+               /* Is a devpts filesystem at "pts" in the same directory? */
+               err = path_pts(&path);
+               if (err) {
+                       result = ERR_PTR(err);
+                       goto out;
+               }
+
+               /* Is the path the root of a devpts filesystem? */
+               result = ERR_PTR(-ENODEV);
+               sb = path.mnt->mnt_sb;
+               if ((sb->s_magic != DEVPTS_SUPER_MAGIC) ||
+                   (path.mnt->mnt_root != sb->s_root))
+                       goto out;
+       }
+
+       /*
+        * pty code needs to hold extra references in case of last /dev/tty close
+        */
+       atomic_inc(&sb->s_active);
+       result = DEVPTS_SB(sb);
+
+out:
+       path_put(&path);
+       return result;
+}
+
+void devpts_release(struct pts_fs_info *fsi)
+{
+       deactivate_super(fsi->sb);
 }
 
 #define PARSE_MOUNT    0
@@ -154,9 +183,7 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode)
 /*
  * parse_mount_options():
  *     Set @opts to mount options specified in @data. If an option is not
- *     specified in @data, set it to its default value. The exception is
- *     'newinstance' option which can only be set/cleared on a mount (i.e.
- *     cannot be changed during remount).
+ *     specified in @data, set it to its default value.
  *
  * Note: @data may be NULL (in which case all options are set to default).
  */
@@ -174,9 +201,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
        opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
        opts->max     = NR_UNIX98_PTY_MAX;
 
-       /* newinstance makes sense only on initial mount */
+       /* Only allow instances mounted from the initial mount
+        * namespace to tap the reserve pool of ptys.
+        */
        if (op == PARSE_MOUNT)
-               opts->newinstance = 0;
+               opts->reserve =
+                       (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns);
 
        while ((p = strsep(&data, ",")) != NULL) {
                substring_t args[MAX_OPT_ARGS];
@@ -211,16 +241,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
                                return -EINVAL;
                        opts->mode = option & S_IALLUGO;
                        break;
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
                case Opt_ptmxmode:
                        if (match_octal(&args[0], &option))
                                return -EINVAL;
                        opts->ptmxmode = option & S_IALLUGO;
                        break;
                case Opt_newinstance:
-                       /* newinstance makes sense only on initial mount */
-                       if (op == PARSE_MOUNT)
-                               opts->newinstance = 1;
                        break;
                case Opt_max:
                        if (match_int(&args[0], &option) ||
@@ -228,7 +254,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
                                return -EINVAL;
                        opts->max = option;
                        break;
-#endif
                default:
                        pr_err("called with bogus options\n");
                        return -EINVAL;
@@ -238,7 +263,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts)
        return 0;
 }
 
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
 static int mknod_ptmx(struct super_block *sb)
 {
        int mode;
@@ -305,12 +329,6 @@ static void update_ptmx_mode(struct pts_fs_info *fsi)
                inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode;
        }
 }
-#else
-static inline void update_ptmx_mode(struct pts_fs_info *fsi)
-{
-       return;
-}
-#endif
 
 static int devpts_remount(struct super_block *sb, int *flags, char *data)
 {
@@ -344,11 +362,9 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root)
                seq_printf(seq, ",gid=%u",
                           from_kgid_munged(&init_user_ns, opts->gid));
        seq_printf(seq, ",mode=%03o", opts->mode);
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
        seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode);
        if (opts->max < NR_UNIX98_PTY_MAX)
                seq_printf(seq, ",max=%d", opts->max);
-#endif
 
        return 0;
 }
@@ -410,40 +426,11 @@ fail:
        return -ENOMEM;
 }
 
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
-static int compare_init_pts_sb(struct super_block *s, void *p)
-{
-       if (devpts_mnt)
-               return devpts_mnt->mnt_sb == s;
-       return 0;
-}
-
 /*
  * devpts_mount()
  *
- *     If the '-o newinstance' mount option was specified, mount a new
- *     (private) instance of devpts.  PTYs created in this instance are
- *     independent of the PTYs in other devpts instances.
- *
- *     If the '-o newinstance' option was not specified, mount/remount the
- *     initial kernel mount of devpts.  This type of mount gives the
- *     legacy, single-instance semantics.
- *
- *     The 'newinstance' option is needed to support multiple namespace
- *     semantics in devpts while preserving backward compatibility of the
- *     current 'single-namespace' semantics. i.e all mounts of devpts
- *     without the 'newinstance' mount option should bind to the initial
- *     kernel mount, like mount_single().
- *
- *     Mounts with 'newinstance' option create a new, private namespace.
- *
- *     NOTE:
- *
- *     For single-mount semantics, devpts cannot use mount_single(),
- *     because mount_single()/sget() find and use the super-block from
- *     the most recent mount of devpts. But that recent mount may be a
- *     'newinstance' mount and mount_single() would pick the newinstance
- *     super-block instead of the initial super-block.
+ *     Mount a new (private) instance of devpts.  PTYs created in this
+ *     instance are independent of the PTYs in other devpts instances.
  */
 static struct dentry *devpts_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
@@ -456,18 +443,7 @@ static struct dentry *devpts_mount(struct file_system_type *fs_type,
        if (error)
                return ERR_PTR(error);
 
-       /* Require newinstance for all user namespace mounts to ensure
-        * the mount options are not changed.
-        */
-       if ((current_user_ns() != &init_user_ns) && !opts.newinstance)
-               return ERR_PTR(-EINVAL);
-
-       if (opts.newinstance)
-               s = sget(fs_type, NULL, set_anon_super, flags, NULL);
-       else
-               s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags,
-                        NULL);
-
+       s = sget(fs_type, NULL, set_anon_super, flags, NULL);
        if (IS_ERR(s))
                return ERR_CAST(s);
 
@@ -491,18 +467,6 @@ out_undo_sget:
        return ERR_PTR(error);
 }
 
-#else
-/*
- * This supports only the legacy single-instance semantics (no
- * multiple-instance semantics)
- */
-static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags,
-               const char *dev_name, void *data)
-{
-       return mount_single(fs_type, flags, data, devpts_fill_super);
-}
-#endif
-
 static void devpts_kill_sb(struct super_block *sb)
 {
        struct pts_fs_info *fsi = DEVPTS_SB(sb);
@@ -516,9 +480,7 @@ static struct file_system_type devpts_fs_type = {
        .name           = "devpts",
        .mount          = devpts_mount,
        .kill_sb        = devpts_kill_sb,
-#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES
        .fs_flags       = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT,
-#endif
 };
 
 /*
@@ -531,16 +493,13 @@ int devpts_new_index(struct pts_fs_info *fsi)
        int index;
        int ida_ret;
 
-       if (!fsi)
-               return -ENODEV;
-
 retry:
        if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
                return -ENOMEM;
 
        mutex_lock(&allocated_ptys_lock);
-       if (pty_count >= pty_limit -
-                       (fsi->mount_opts.newinstance ? pty_reserve : 0)) {
+       if (pty_count >= (pty_limit -
+                         (fsi->mount_opts.reserve ? 0 : pty_reserve))) {
                mutex_unlock(&allocated_ptys_lock);
                return -ENOSPC;
        }
@@ -571,30 +530,6 @@ void devpts_kill_index(struct pts_fs_info *fsi, int idx)
        mutex_unlock(&allocated_ptys_lock);
 }
 
-/*
- * pty code needs to hold extra references in case of last /dev/tty close
- */
-struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
-{
-       struct super_block *sb;
-       struct pts_fs_info *fsi;
-
-       sb = pts_sb_from_inode(ptmx_inode);
-       if (!sb)
-               return NULL;
-       fsi = DEVPTS_SB(sb);
-       if (!fsi)
-               return NULL;
-
-       atomic_inc(&sb->s_active);
-       return fsi;
-}
-
-void devpts_put_ref(struct pts_fs_info *fsi)
-{
-       deactivate_super(fsi->sb);
-}
-
 /**
  * devpts_pty_new -- create a new inode in /dev/pts/
  * @ptmx_inode: inode of the master
@@ -607,16 +542,12 @@ void devpts_put_ref(struct pts_fs_info *fsi)
 struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
 {
        struct dentry *dentry;
-       struct super_block *sb;
+       struct super_block *sb = fsi->sb;
        struct inode *inode;
        struct dentry *root;
        struct pts_mount_opts *opts;
        char s[12];
 
-       if (!fsi)
-               return ERR_PTR(-ENODEV);
-
-       sb = fsi->sb;
        root = sb->s_root;
        opts = &fsi->mount_opts;
 
@@ -676,20 +607,8 @@ void devpts_pty_kill(struct dentry *dentry)
 static int __init init_devpts_fs(void)
 {
        int err = register_filesystem(&devpts_fs_type);
-       struct ctl_table_header *table;
-
        if (!err) {
-               struct vfsmount *mnt;
-
-               table = register_sysctl_table(pty_root_table);
-               mnt = kern_mount(&devpts_fs_type);
-               if (IS_ERR(mnt)) {
-                       err = PTR_ERR(mnt);
-                       unregister_filesystem(&devpts_fs_type);
-                       unregister_sysctl_table(table);
-               } else {
-                       devpts_mnt = mnt;
-               }
+               register_sysctl_table(pty_root_table);
        }
        return err;
 }
index 0d8eb3455b34d68cde59a48b5b46da9b67498445..e5e29f8c920b18bc6959cdb16d2ee57fec7d45a6 100644 (file)
@@ -45,7 +45,7 @@
  * ecryptfs_to_hex
  * @dst: Buffer to take hex character representation of contents of
  *       src; must be at least of size (src_size * 2)
- * @src: Buffer to be converted to a hex string respresentation
+ * @src: Buffer to be converted to a hex string representation
  * @src_size: number of bytes to convert
  */
 void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
@@ -60,7 +60,7 @@ void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
  * ecryptfs_from_hex
  * @dst: Buffer to take the bytes from src hex; must be at least of
  *       size (src_size / 2)
- * @src: Buffer to be converted from a hex string respresentation to raw value
+ * @src: Buffer to be converted from a hex string representation to raw value
  * @dst_size: size of dst buffer, or number of hex characters pairs to convert
  */
 void ecryptfs_from_hex(char *dst, char *src, int dst_size)
@@ -953,7 +953,7 @@ struct ecryptfs_cipher_code_str_map_elem {
 };
 
 /* Add support for additional ciphers by adding elements here. The
- * cipher_code is whatever OpenPGP applicatoins use to identify the
+ * cipher_code is whatever OpenPGP applications use to identify the
  * ciphers. List in order of probability. */
 static struct ecryptfs_cipher_code_str_map_elem
 ecryptfs_cipher_code_str_map[] = {
@@ -1410,7 +1410,7 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
  *
  * Common entry point for reading file metadata. From here, we could
  * retrieve the header information from the header region of the file,
- * the xattr region of the file, or some other repostory that is
+ * the xattr region of the file, or some other repository that is
  * stored separately from the file itself. The current implementation
  * supports retrieving the metadata information from the file contents
  * and from the xattr region.
index 7000b96b783ef04a56f056a83df595c8093533aa..ca4e83750214adc2b0ea77358937c167d02d6ba7 100644 (file)
@@ -169,9 +169,22 @@ out:
        return rc;
 }
 
+static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct file *lower_file = ecryptfs_file_to_lower(file);
+       /*
+        * Don't allow mmap on top of file systems that don't support it
+        * natively.  If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
+        * allows recursive mounting, this will need to be extended.
+        */
+       if (!lower_file->f_op->mmap)
+               return -ENODEV;
+       return generic_file_mmap(file, vma);
+}
+
 /**
  * ecryptfs_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
  * @file: Structure to return filled in
  *
  * Opens the file specified by inode.
@@ -240,7 +253,7 @@ out:
 
 /**
  * ecryptfs_dir_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
  * @file: Structure to return filled in
  *
  * Opens the file specified by inode.
@@ -403,7 +416,7 @@ const struct file_operations ecryptfs_main_fops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
 #endif
-       .mmap = generic_file_mmap,
+       .mmap = ecryptfs_mmap,
        .open = ecryptfs_open,
        .flush = ecryptfs_flush,
        .release = ecryptfs_release,
index 1698132d0e576d4fea3690f56190242de33645fc..6120044951415d7840308eea25a21bd125085109 100644 (file)
@@ -738,8 +738,7 @@ static void ecryptfs_free_kmem_caches(void)
                struct ecryptfs_cache_info *info;
 
                info = &ecryptfs_cache_infos[i];
-               if (*(info->cache))
-                       kmem_cache_destroy(*(info->cache));
+               kmem_cache_destroy(*(info->cache));
        }
 }
 
index 989a2cef6b765023b670d75af4fb3bdbf6f650e6..fe7e83a45efffeb85329b194b03208e6223817e3 100644 (file)
@@ -483,9 +483,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
                goto out_free;
        }
        inode->i_state |= I_WB_SWITCH;
+       __iget(inode);
        spin_unlock(&inode->i_lock);
 
-       ihold(inode);
        isw->inode = inode;
 
        atomic_inc(&isw_nr_in_flight);
index 3078b679fcd1b4a74b1839a7d5cc5692360bf2e1..c8c4f79c7ce167b65549557ca397d9f49a43ca10 100644 (file)
@@ -887,6 +887,8 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie)
                        put_page(results[i]);
        }
 
+       wake_up_bit(&cookie->flags, 0);
+
        _leave("");
 }
 
index ccd4971cc6c1ac787ede8998d73f3e68f3a1294a..cca7b048c07b26e4919769fed461b46771005a19 100644 (file)
@@ -341,8 +341,10 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
        struct dentry *newent;
        bool outarg_valid = true;
 
+       fuse_lock_inode(dir);
        err = fuse_lookup_name(dir->i_sb, get_node_id(dir), &entry->d_name,
                               &outarg, &inode);
+       fuse_unlock_inode(dir);
        if (err == -ENOENT) {
                outarg_valid = false;
                err = 0;
@@ -478,7 +480,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
        struct fuse_conn *fc = get_fuse_conn(dir);
        struct dentry *res = NULL;
 
-       if (d_unhashed(entry)) {
+       if (d_in_lookup(entry)) {
                res = fuse_lookup(dir, entry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
@@ -1341,7 +1343,9 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
                fuse_read_fill(req, file, ctx->pos, PAGE_SIZE,
                               FUSE_READDIR);
        }
+       fuse_lock_inode(inode);
        fuse_request_send(fc, req);
+       fuse_unlock_inode(inode);
        nbytes = req->out.args[0].size;
        err = req->out.h.error;
        fuse_put_request(fc, req);
index eddbe02c402892cc00970844021fd12512f133b4..929c383432b034f3f695e0cc83e55e9b48c35898 100644 (file)
@@ -110,6 +110,9 @@ struct fuse_inode {
 
        /** Miscellaneous bits describing inode state */
        unsigned long state;
+
+       /** Lock for serializing lookup and readdir for back compatibility*/
+       struct mutex mutex;
 };
 
 /** FUSE inode state bits */
@@ -540,6 +543,9 @@ struct fuse_conn {
        /** write-back cache policy (default is write-through) */
        unsigned writeback_cache:1;
 
+       /** allow parallel lookups and readdir (default is serialized) */
+       unsigned parallel_dirops:1;
+
        /*
         * The following bitfields are only for optimization purposes
         * and hence races in setting them will not cause malfunction
@@ -956,4 +962,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
 
 void fuse_set_initialized(struct fuse_conn *fc);
 
+void fuse_unlock_inode(struct inode *inode);
+void fuse_lock_inode(struct inode *inode);
+
 #endif /* _FS_FUSE_I_H */
index 1ce67668a8e17d2d721c456ed02865e01c01f7a2..9961d8432ce335ba445df4a36824cd12912f1419 100644 (file)
@@ -97,6 +97,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        INIT_LIST_HEAD(&fi->queued_writes);
        INIT_LIST_HEAD(&fi->writepages);
        init_waitqueue_head(&fi->page_waitq);
+       mutex_init(&fi->mutex);
        fi->forget = fuse_alloc_forget();
        if (!fi->forget) {
                kmem_cache_free(fuse_inode_cachep, inode);
@@ -117,6 +118,7 @@ static void fuse_destroy_inode(struct inode *inode)
        struct fuse_inode *fi = get_fuse_inode(inode);
        BUG_ON(!list_empty(&fi->write_files));
        BUG_ON(!list_empty(&fi->queued_writes));
+       mutex_destroy(&fi->mutex);
        kfree(fi->forget);
        call_rcu(&inode->i_rcu, fuse_i_callback);
 }
@@ -351,6 +353,18 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
        return 0;
 }
 
+void fuse_lock_inode(struct inode *inode)
+{
+       if (!get_fuse_conn(inode)->parallel_dirops)
+               mutex_lock(&get_fuse_inode(inode)->mutex);
+}
+
+void fuse_unlock_inode(struct inode *inode)
+{
+       if (!get_fuse_conn(inode)->parallel_dirops)
+               mutex_unlock(&get_fuse_inode(inode)->mutex);
+}
+
 static void fuse_umount_begin(struct super_block *sb)
 {
        fuse_abort_conn(get_fuse_conn_super(sb));
@@ -898,6 +912,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->async_dio = 1;
                        if (arg->flags & FUSE_WRITEBACK_CACHE)
                                fc->writeback_cache = 1;
+                       if (arg->flags & FUSE_PARALLEL_DIROPS)
+                               fc->parallel_dirops = 1;
                        if (arg->time_gran && arg->time_gran <= 1000000000)
                                fc->sb->s_time_gran = arg->time_gran;
                } else {
@@ -928,7 +944,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
                FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
                FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
                FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
-               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
+               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
+               FUSE_PARALLEL_DIROPS;
        req->in.h.opcode = FUSE_INIT;
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(*arg);
index 21dc784f66c2268d2e857314104847b600cc09de..9bad79fede37fb7e27023df1c1908e8a1783fb88 100644 (file)
@@ -1189,7 +1189,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
        struct dentry *d;
        bool excl = !!(flags & O_EXCL);
 
-       if (!d_unhashed(dentry))
+       if (!d_in_lookup(dentry))
                goto skip_lookup;
 
        d = __gfs2_lookup(dir, dentry, file, opened);
index b71deeecea17939d85bce1c54f30da77f1c4d8a1..f57ced528cde71d498ecb6339188cf6d4cd216f9 100644 (file)
@@ -130,6 +130,7 @@ extern int invalidate_inodes(struct super_block *, bool);
 extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
 extern int d_set_mounted(struct dentry *dentry);
 extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
+extern struct dentry *d_alloc_cursor(struct dentry *);
 
 /*
  * read_write.c
index b31852f76f46585137021df6266a18ba743b8528..e3ca4b4cac84a4597b913888359cce6a4540ff09 100644 (file)
@@ -2329,18 +2329,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
 
        BUG_ON(size & (size-1)); /* Must be a power of 2 */
 
-       flags |= __GFP_REPEAT;
-       if (size == PAGE_SIZE)
-               ptr = (void *)__get_free_pages(flags, 0);
-       else if (size > PAGE_SIZE) {
-               int order = get_order(size);
-
-               if (order < 3)
-                       ptr = (void *)__get_free_pages(flags, order);
-               else
-                       ptr = vmalloc(size);
-       } else
+       if (size < PAGE_SIZE)
                ptr = kmem_cache_alloc(get_slab(size), flags);
+       else
+               ptr = (void *)__get_free_pages(flags, get_order(size));
 
        /* Check alignment; SLUB has gotten this wrong in the past,
         * and this can lead to user data corruption! */
@@ -2351,20 +2343,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
 
 void jbd2_free(void *ptr, size_t size)
 {
-       if (size == PAGE_SIZE) {
-               free_pages((unsigned long)ptr, 0);
-               return;
-       }
-       if (size > PAGE_SIZE) {
-               int order = get_order(size);
-
-               if (order < 3)
-                       free_pages((unsigned long)ptr, order);
-               else
-                       vfree(ptr);
-               return;
-       }
-       kmem_cache_free(get_slab(size), ptr);
+       if (size < PAGE_SIZE)
+               kmem_cache_free(get_slab(size), ptr);
+       else
+               free_pages((unsigned long)ptr, get_order(size));
 };
 
 /*
index 3db2721144c27d133cc7f8652d269900c9865ea5..74dc8b9e7f53ab394113783c96fb754c045735fa 100644 (file)
@@ -71,9 +71,7 @@ EXPORT_SYMBOL(simple_lookup);
 
 int dcache_dir_open(struct inode *inode, struct file *file)
 {
-       static struct qstr cursor_name = QSTR_INIT(".", 1);
-
-       file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
+       file->private_data = d_alloc_cursor(file->f_path.dentry);
 
        return file->private_data ? 0 : -ENOMEM;
 }
@@ -86,6 +84,61 @@ int dcache_dir_close(struct inode *inode, struct file *file)
 }
 EXPORT_SYMBOL(dcache_dir_close);
 
+/* parent is locked at least shared */
+static struct dentry *next_positive(struct dentry *parent,
+                                   struct list_head *from,
+                                   int count)
+{
+       unsigned *seq = &parent->d_inode->i_dir_seq, n;
+       struct dentry *res;
+       struct list_head *p;
+       bool skipped;
+       int i;
+
+retry:
+       i = count;
+       skipped = false;
+       n = smp_load_acquire(seq) & ~1;
+       res = NULL;
+       rcu_read_lock();
+       for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+               struct dentry *d = list_entry(p, struct dentry, d_child);
+               if (!simple_positive(d)) {
+                       skipped = true;
+               } else if (!--i) {
+                       res = d;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       if (skipped) {
+               smp_rmb();
+               if (unlikely(*seq != n))
+                       goto retry;
+       }
+       return res;
+}
+
+static void move_cursor(struct dentry *cursor, struct list_head *after)
+{
+       struct dentry *parent = cursor->d_parent;
+       unsigned n, *seq = &parent->d_inode->i_dir_seq;
+       spin_lock(&parent->d_lock);
+       for (;;) {
+               n = *seq;
+               if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
+                       break;
+               cpu_relax();
+       }
+       __list_del(cursor->d_child.prev, cursor->d_child.next);
+       if (after)
+               list_add(&cursor->d_child, after);
+       else
+               list_add_tail(&cursor->d_child, &parent->d_subdirs);
+       smp_store_release(seq, n + 2);
+       spin_unlock(&parent->d_lock);
+}
+
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
 {
        struct dentry *dentry = file->f_path.dentry;
@@ -101,25 +154,14 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
        if (offset != file->f_pos) {
                file->f_pos = offset;
                if (file->f_pos >= 2) {
-                       struct list_head *p;
                        struct dentry *cursor = file->private_data;
+                       struct dentry *to;
                        loff_t n = file->f_pos - 2;
 
-                       spin_lock(&dentry->d_lock);
-                       /* d_lock not required for cursor */
-                       list_del(&cursor->d_child);
-                       p = dentry->d_subdirs.next;
-                       while (n && p != &dentry->d_subdirs) {
-                               struct dentry *next;
-                               next = list_entry(p, struct dentry, d_child);
-                               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
-                               if (simple_positive(next))
-                                       n--;
-                               spin_unlock(&next->d_lock);
-                               p = p->next;
-                       }
-                       list_add_tail(&cursor->d_child, p);
-                       spin_unlock(&dentry->d_lock);
+                       inode_lock_shared(dentry->d_inode);
+                       to = next_positive(dentry, &dentry->d_subdirs, n);
+                       move_cursor(cursor, to ? &to->d_child : NULL);
+                       inode_unlock_shared(dentry->d_inode);
                }
        }
        return offset;
@@ -142,36 +184,25 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
 {
        struct dentry *dentry = file->f_path.dentry;
        struct dentry *cursor = file->private_data;
-       struct list_head *p, *q = &cursor->d_child;
+       struct list_head *p = &cursor->d_child;
+       struct dentry *next;
+       bool moved = false;
 
        if (!dir_emit_dots(file, ctx))
                return 0;
-       spin_lock(&dentry->d_lock);
-       if (ctx->pos == 2)
-               list_move(q, &dentry->d_subdirs);
 
-       for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
-               struct dentry *next = list_entry(p, struct dentry, d_child);
-               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
-               if (!simple_positive(next)) {
-                       spin_unlock(&next->d_lock);
-                       continue;
-               }
-
-               spin_unlock(&next->d_lock);
-               spin_unlock(&dentry->d_lock);
+       if (ctx->pos == 2)
+               p = &dentry->d_subdirs;
+       while ((next = next_positive(dentry, p, 1)) != NULL) {
                if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
                              d_inode(next)->i_ino, dt_type(d_inode(next))))
-                       return 0;
-               spin_lock(&dentry->d_lock);
-               spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
-               /* next is still alive */
-               list_move(q, p);
-               spin_unlock(&next->d_lock);
-               p = q;
+                       break;
+               moved = true;
+               p = &next->d_child;
                ctx->pos++;
        }
-       spin_unlock(&dentry->d_lock);
+       if (moved)
+               move_cursor(cursor, p);
        return 0;
 }
 EXPORT_SYMBOL(dcache_readdir);
index 154a107cd376238349ef6a889bb85ab0d23a0e31..fc4084ef4736d47a410e27052497cd00829204c2 100644 (file)
@@ -335,12 +335,17 @@ static struct notifier_block lockd_inet6addr_notifier = {
 };
 #endif
 
-static void lockd_svc_exit_thread(void)
+static void lockd_unregister_notifiers(void)
 {
        unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
 #endif
+}
+
+static void lockd_svc_exit_thread(void)
+{
+       lockd_unregister_notifiers();
        svc_exit_thread(nlmsvc_rqst);
 }
 
@@ -462,7 +467,7 @@ int lockd_up(struct net *net)
         * Note: svc_serv structures have an initial use count of 1,
         * so we exit through here on both success and failure.
         */
-err_net:
+err_put:
        svc_destroy(serv);
 err_create:
        mutex_unlock(&nlmsvc_mutex);
@@ -470,7 +475,9 @@ err_create:
 
 err_start:
        lockd_down_net(serv, net);
-       goto err_net;
+err_net:
+       lockd_unregister_notifiers();
+       goto err_put;
 }
 EXPORT_SYMBOL_GPL(lockd_up);
 
index 7c5f91be9b65c4ddabe441d00bbda616ca0c2f26..ee1b15f6fc135c33e2e0564eb5e10db927080533 100644 (file)
@@ -1628,7 +1628,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
 {
        struct file_lock *fl, *my_fl = NULL, *lease;
        struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct file_lock_context *ctx;
        bool is_deleg = (*flp)->fl_flags & FL_DELEG;
        int error;
index 4c4f95ac8aa5d535c1e919b4bbc9c394010bd88d..70580ab1445c89f8379756fc16e33223aca4ab0d 100644 (file)
@@ -1416,21 +1416,28 @@ static void follow_mount(struct path *path)
        }
 }
 
+static int path_parent_directory(struct path *path)
+{
+       struct dentry *old = path->dentry;
+       /* rare case of legitimate dget_parent()... */
+       path->dentry = dget_parent(path->dentry);
+       dput(old);
+       if (unlikely(!path_connected(path)))
+               return -ENOENT;
+       return 0;
+}
+
 static int follow_dotdot(struct nameidata *nd)
 {
        while(1) {
-               struct dentry *old = nd->path.dentry;
-
                if (nd->path.dentry == nd->root.dentry &&
                    nd->path.mnt == nd->root.mnt) {
                        break;
                }
                if (nd->path.dentry != nd->path.mnt->mnt_root) {
-                       /* rare case of legitimate dget_parent()... */
-                       nd->path.dentry = dget_parent(nd->path.dentry);
-                       dput(old);
-                       if (unlikely(!path_connected(&nd->path)))
-                               return -ENOENT;
+                       int ret = path_parent_directory(&nd->path);
+                       if (ret)
+                               return ret;
                        break;
                }
                if (!follow_up(&nd->path))
@@ -2514,6 +2521,34 @@ struct dentry *lookup_one_len_unlocked(const char *name,
 }
 EXPORT_SYMBOL(lookup_one_len_unlocked);
 
+#ifdef CONFIG_UNIX98_PTYS
+int path_pts(struct path *path)
+{
+       /* Find something mounted on "pts" in the same directory as
+        * the input path.
+        */
+       struct dentry *child, *parent;
+       struct qstr this;
+       int ret;
+
+       ret = path_parent_directory(path);
+       if (ret)
+               return ret;
+
+       parent = path->dentry;
+       this.name = "pts";
+       this.len = 3;
+       child = d_hash_and_lookup(parent, &this);
+       if (!child)
+               return -ENOENT;
+
+       path->dentry = child;
+       dput(parent);
+       follow_mount(path);
+       return 0;
+}
+#endif
+
 int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
                 struct path *path, int *empty)
 {
@@ -2995,9 +3030,13 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
                        }
                        if (*opened & FILE_CREATED)
                                fsnotify_create(dir, dentry);
-                       path->dentry = dentry;
-                       path->mnt = nd->path.mnt;
-                       return 1;
+                       if (unlikely(d_is_negative(dentry))) {
+                               error = -ENOENT;
+                       } else {
+                               path->dentry = dentry;
+                               path->mnt = nd->path.mnt;
+                               return 1;
+                       }
                }
        }
        dput(dentry);
@@ -3166,9 +3205,7 @@ static int do_last(struct nameidata *nd,
        int acc_mode = op->acc_mode;
        unsigned seq;
        struct inode *inode;
-       struct path save_parent = { .dentry = NULL, .mnt = NULL };
        struct path path;
-       bool retried = false;
        int error;
 
        nd->flags &= ~LOOKUP_PARENT;
@@ -3211,7 +3248,6 @@ static int do_last(struct nameidata *nd,
                        return -EISDIR;
        }
 
-retry_lookup:
        if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
                error = mnt_want_write(nd->path.mnt);
                if (!error)
@@ -3263,6 +3299,10 @@ retry_lookup:
                got_write = false;
        }
 
+       error = follow_managed(&path, nd);
+       if (unlikely(error < 0))
+               return error;
+
        if (unlikely(d_is_negative(path.dentry))) {
                path_to_nameidata(&path, nd);
                return -ENOENT;
@@ -3278,10 +3318,6 @@ retry_lookup:
                return -EEXIST;
        }
 
-       error = follow_managed(&path, nd);
-       if (unlikely(error < 0))
-               return error;
-
        seq = 0;        /* out of RCU mode, so the value doesn't matter */
        inode = d_backing_inode(path.dentry);
 finish_lookup:
@@ -3292,23 +3328,14 @@ finish_lookup:
        if (unlikely(error))
                return error;
 
-       if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
-               path_to_nameidata(&path, nd);
-       } else {
-               save_parent.dentry = nd->path.dentry;
-               save_parent.mnt = mntget(path.mnt);
-               nd->path.dentry = path.dentry;
-
-       }
+       path_to_nameidata(&path, nd);
        nd->inode = inode;
        nd->seq = seq;
        /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
 finish_open:
        error = complete_walk(nd);
-       if (error) {
-               path_put(&save_parent);
+       if (error)
                return error;
-       }
        audit_inode(nd->name, nd->path.dentry, 0);
        error = -EISDIR;
        if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
@@ -3331,13 +3358,9 @@ finish_open_created:
                goto out;
        BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
        error = vfs_open(&nd->path, file, current_cred());
-       if (!error) {
-               *opened |= FILE_OPENED;
-       } else {
-               if (error == -EOPENSTALE)
-                       goto stale_open;
+       if (error)
                goto out;
-       }
+       *opened |= FILE_OPENED;
 opened:
        error = open_check_o_direct(file);
        if (!error)
@@ -3353,26 +3376,7 @@ out:
        }
        if (got_write)
                mnt_drop_write(nd->path.mnt);
-       path_put(&save_parent);
        return error;
-
-stale_open:
-       /* If no saved parent or already retried then can't retry */
-       if (!save_parent.dentry || retried)
-               goto out;
-
-       BUG_ON(save_parent.dentry != dir);
-       path_put(&nd->path);
-       nd->path = save_parent;
-       nd->inode = dir->d_inode;
-       save_parent.mnt = NULL;
-       save_parent.dentry = NULL;
-       if (got_write) {
-               mnt_drop_write(nd->path.mnt);
-               got_write = false;
-       }
-       retried = true;
-       goto retry_lookup;
 }
 
 static int do_tmpfile(struct nameidata *nd, unsigned flags,
index 4fb1691b435552c044134b56aa2032da35684b5b..419f746d851d1c1745fc0edca4a37459a4b17f35 100644 (file)
@@ -1562,6 +1562,7 @@ void __detach_mounts(struct dentry *dentry)
                goto out_unlock;
 
        lock_mount_hash();
+       event++;
        while (!hlist_empty(&mp->m_list)) {
                mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
                if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
@@ -2409,8 +2410,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
                        mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
                }
                if (type->fs_flags & FS_USERNS_VISIBLE) {
-                       if (!fs_fully_visible(type, &mnt_flags))
+                       if (!fs_fully_visible(type, &mnt_flags)) {
+                               put_filesystem(type);
                                return -EPERM;
+                       }
                }
        }
 
@@ -3245,6 +3248,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
                if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
                        mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
 
+               /* Don't miss readonly hidden in the superblock flags */
+               if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
+                       mnt_flags |= MNT_LOCK_READONLY;
+
                /* Verify the mount flags are equal to or more permissive
                 * than the proposed new mount.
                 */
@@ -3271,7 +3278,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
                list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
                        struct inode *inode = child->mnt_mountpoint->d_inode;
                        /* Only worry about locked mounts */
-                       if (!(mnt_flags & MNT_LOCKED))
+                       if (!(child->mnt.mnt_flags & MNT_LOCKED))
                                continue;
                        /* Is the directory permanetly empty? */
                        if (!is_empty_dir_inode(inode))
index aaf7bd0cbae20216333042c5a082a7560f6af61f..19d93d0cd400f5ac175a9d257ec8a8ced0e04c21 100644 (file)
@@ -424,12 +424,17 @@ static int xdr_decode(nfs_readdir_descriptor_t *desc,
 static
 int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
 {
+       struct inode *inode;
        struct nfs_inode *nfsi;
 
        if (d_really_is_negative(dentry))
                return 0;
 
-       nfsi = NFS_I(d_inode(dentry));
+       inode = d_inode(dentry);
+       if (is_bad_inode(inode) || NFS_STALE(inode))
+               return 0;
+
+       nfsi = NFS_I(inode);
        if (entry->fattr->fileid == nfsi->fileid)
                return 1;
        if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0)
@@ -1363,7 +1368,6 @@ EXPORT_SYMBOL_GPL(nfs_dentry_operations);
 struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
 {
        struct dentry *res;
-       struct dentry *parent;
        struct inode *inode = NULL;
        struct nfs_fh *fhandle = NULL;
        struct nfs_fattr *fattr = NULL;
@@ -1393,7 +1397,6 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
        if (IS_ERR(label))
                goto out;
 
-       parent = dentry->d_parent;
        /* Protect against concurrent sillydeletes */
        trace_nfs_lookup_enter(dir, dentry, flags);
        error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
@@ -1482,11 +1485,13 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                    struct file *file, unsigned open_flags,
                    umode_t mode, int *opened)
 {
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
        struct nfs_open_context *ctx;
        struct dentry *res;
        struct iattr attr = { .ia_valid = ATTR_OPEN };
        struct inode *inode;
        unsigned int lookup_flags = 0;
+       bool switched = false;
        int err;
 
        /* Expect a negative dentry */
@@ -1501,7 +1506,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
 
        /* NFS only supports OPEN on regular files */
        if ((open_flags & O_DIRECTORY)) {
-               if (!d_unhashed(dentry)) {
+               if (!d_in_lookup(dentry)) {
                        /*
                         * Hashed negative dentry with O_DIRECTORY: dentry was
                         * revalidated and is fine, no need to perform lookup
@@ -1525,6 +1530,17 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                attr.ia_size = 0;
        }
 
+       if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
+               d_drop(dentry);
+               switched = true;
+               dentry = d_alloc_parallel(dentry->d_parent,
+                                         &dentry->d_name, &wq);
+               if (IS_ERR(dentry))
+                       return PTR_ERR(dentry);
+               if (unlikely(!d_in_lookup(dentry)))
+                       return finish_no_open(file, dentry);
+       }
+
        ctx = create_nfs_open_context(dentry, open_flags);
        err = PTR_ERR(ctx);
        if (IS_ERR(ctx))
@@ -1536,9 +1552,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                err = PTR_ERR(inode);
                trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
                put_nfs_open_context(ctx);
+               d_drop(dentry);
                switch (err) {
                case -ENOENT:
-                       d_drop(dentry);
                        d_add(dentry, NULL);
                        nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
                        break;
@@ -1560,14 +1576,23 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
        trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
        put_nfs_open_context(ctx);
 out:
+       if (unlikely(switched)) {
+               d_lookup_done(dentry);
+               dput(dentry);
+       }
        return err;
 
 no_open:
        res = nfs_lookup(dir, dentry, lookup_flags);
-       err = PTR_ERR(res);
+       if (switched) {
+               d_lookup_done(dentry);
+               if (!res)
+                       res = dentry;
+               else
+                       dput(dentry);
+       }
        if (IS_ERR(res))
-               goto out;
-
+               return PTR_ERR(res);
        return finish_no_open(file, res);
 }
 EXPORT_SYMBOL_GPL(nfs_atomic_open);
index 979b3c4dee6aedbb354faefd1aca8cf1b3be8789..c7326c2af2c3d33df7a96497c427664d90f7861f 100644 (file)
@@ -353,10 +353,12 @@ static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
 
        result = wait_for_completion_killable(&dreq->completion);
 
+       if (!result) {
+               result = dreq->count;
+               WARN_ON_ONCE(dreq->count < 0);
+       }
        if (!result)
                result = dreq->error;
-       if (!result)
-               result = dreq->count;
 
 out:
        return (ssize_t) result;
@@ -386,8 +388,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write)
 
        if (dreq->iocb) {
                long res = (long) dreq->error;
-               if (!res)
+               if (dreq->count != 0) {
                        res = (long) dreq->count;
+                       WARN_ON_ONCE(dreq->count < 0);
+               }
                dreq->iocb->ki_complete(dreq->iocb, res, 0);
        }
 
index 52e7d6869e3b2445dc6417875b3c753edb681ecc..dda689d7a8a706862e21737cb5189ab175d976e1 100644 (file)
@@ -282,6 +282,7 @@ nfs_init_locked(struct inode *inode, void *opaque)
        struct nfs_fattr        *fattr = desc->fattr;
 
        set_nfs_fileid(inode, fattr->fileid);
+       inode->i_mode = fattr->mode;
        nfs_copy_fh(NFS_FH(inode), desc->fh);
        return 0;
 }
index de97567795a52c523abfa5936829303a388560e8..ff416d0e24bc25215a991c282e8ae19a65d31832 100644 (file)
@@ -2882,12 +2882,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                        call_close |= is_wronly;
                else if (is_wronly)
                        calldata->arg.fmode |= FMODE_WRITE;
+               if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
+                       call_close |= is_rdwr;
        } else if (is_rdwr)
                calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
 
-       if (calldata->arg.fmode == 0)
-               call_close |= is_rdwr;
-
        if (!nfs4_valid_open_stateid(state))
                call_close = 0;
        spin_unlock(&state->owner->so_lock);
@@ -7924,8 +7923,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                        break;
                }
                lo = NFS_I(inode)->layout;
-               if (lo && nfs4_stateid_match(&lgp->args.stateid,
-                                       &lo->plh_stateid)) {
+               if (lo && !test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) &&
+                   nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
                        LIST_HEAD(head);
 
                        /*
@@ -7936,10 +7935,10 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                        pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
                        spin_unlock(&inode->i_lock);
                        pnfs_free_lseg_list(&head);
+                       status = -EAGAIN;
+                       goto out;
                } else
                        spin_unlock(&inode->i_lock);
-               status = -EAGAIN;
-               goto out;
        }
 
        status = nfs4_handle_exception(server, status, exception);
@@ -8036,7 +8035,10 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
                .flags = RPC_TASK_ASYNC,
        };
        struct pnfs_layout_segment *lseg = NULL;
-       struct nfs4_exception exception = { .timeout = *timeout };
+       struct nfs4_exception exception = {
+               .inode = inode,
+               .timeout = *timeout,
+       };
        int status = 0;
 
        dprintk("--> %s\n", __func__);
index 9679f47493640d4ce4a8a5e571727ece40e13625..834b875900d62addf6db7f6eb590ce5c2d1b09bc 100644 (file)
@@ -1488,9 +1488,9 @@ restart:
                                        }
                                        spin_unlock(&state->state_lock);
                                }
-                               nfs4_put_open_state(state);
                                clear_bit(NFS_STATE_RECLAIM_NOGRACE,
                                        &state->flags);
+                               nfs4_put_open_state(state);
                                spin_lock(&sp->so_lock);
                                goto restart;
                        }
index 0c7e0d45a4de6ee1fba11c40417d3fb01678049b..0fbe734cc38cb8d27ba2c8efaf985a676f4d0cd8 100644 (file)
@@ -361,8 +361,10 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
        list_del_init(&lseg->pls_list);
        /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
        atomic_dec(&lo->plh_refcount);
-       if (list_empty(&lo->plh_segs))
+       if (list_empty(&lo->plh_segs)) {
+               set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
                clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+       }
        rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
 }
 
@@ -1290,6 +1292,7 @@ alloc_init_layout_hdr(struct inode *ino,
        INIT_LIST_HEAD(&lo->plh_bulk_destroy);
        lo->plh_inode = ino;
        lo->plh_lc_cred = get_rpccred(ctx->cred);
+       lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
        return lo;
 }
 
@@ -1297,6 +1300,8 @@ static struct pnfs_layout_hdr *
 pnfs_find_alloc_layout(struct inode *ino,
                       struct nfs_open_context *ctx,
                       gfp_t gfp_flags)
+       __releases(&ino->i_lock)
+       __acquires(&ino->i_lock)
 {
        struct nfs_inode *nfsi = NFS_I(ino);
        struct pnfs_layout_hdr *new = NULL;
@@ -1565,8 +1570,7 @@ lookup_again:
         * stateid, or it has been invalidated, then we must use the open
         * stateid.
         */
-       if (lo->plh_stateid.seqid == 0 ||
-           test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
+       if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
 
                /*
                 * The first layoutget for the file. Need to serialize per
index 0dfc476da3e10918bcf0242d779cda343e6dac61..b38e3c0dc7908566acd1c8bc9d51402e8069ef6a 100644 (file)
@@ -247,7 +247,11 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
 }
 
 /* Helper function for pnfs_generic_commit_pagelist to catch an empty
- * page list. This can happen when two commits race. */
+ * page list. This can happen when two commits race.
+ *
+ * This must be called instead of nfs_init_commit - call one or the other, but
+ * not both!
+ */
 static bool
 pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
                                          struct nfs_commit_data *data,
@@ -256,7 +260,11 @@ pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
        if (list_empty(pages)) {
                if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
                        wake_up_atomic_t(&cinfo->mds->rpcs_out);
-               nfs_commitdata_release(data);
+               /* don't call nfs_commitdata_release - it tries to put
+                * the open_context which is not acquired until nfs_init_commit
+                * which has not been called on @data */
+               WARN_ON_ONCE(data->context);
+               nfs_commit_free(data);
                return true;
        }
 
index 6776d7a7839e0e8afcd966d296678fbc0d69ca7d..572e5b3b06f1566f40e7df7be33b8f14aafd2e16 100644 (file)
@@ -367,13 +367,13 @@ readpage_async_filler(void *data, struct page *page)
                nfs_list_remove_request(new);
                nfs_readpage_release(new);
                error = desc->pgio->pg_error;
-               goto out_unlock;
+               goto out;
        }
        return 0;
 out_error:
        error = PTR_ERR(new);
-out_unlock:
        unlock_page(page);
+out:
        return error;
 }
 
index e55b5242614da7a9d209db34e8c0f6519cf69446..31f3df193bdbb47b82dbe8df7f67b33163d55828 100644 (file)
@@ -290,7 +290,7 @@ out_free_buf:
        return error;
 }
 
-#define NFSD_MDS_PR_KEY                0x0100000000000000
+#define NFSD_MDS_PR_KEY                0x0100000000000000ULL
 
 /*
  * We use the client ID as a unique key for the reservations.
index 1580ea6fd64df15da96b832eeef9f3dc2560e32b..d08cd88155c75278c4607f49c078622bf87ab5ee 100644 (file)
@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
                goto out;
 
        inode = d_inode(fh->fh_dentry);
-       if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
-               error = -EOPNOTSUPP;
-               goto out_errno;
-       }
 
        error = fh_want_write(fh);
        if (error)
                goto out_errno;
 
-       error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+       fh_lock(fh);
+
+       error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
        if (error)
-               goto out_drop_write;
-       error = inode->i_op->set_acl(inode, argp->acl_default,
-                                    ACL_TYPE_DEFAULT);
+               goto out_drop_lock;
+       error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
        if (error)
-               goto out_drop_write;
+               goto out_drop_lock;
+
+       fh_unlock(fh);
 
        fh_drop_write(fh);
 
@@ -131,7 +130,8 @@ out:
        posix_acl_release(argp->acl_access);
        posix_acl_release(argp->acl_default);
        return nfserr;
-out_drop_write:
+out_drop_lock:
+       fh_unlock(fh);
        fh_drop_write(fh);
 out_errno:
        nfserr = nfserrno(error);
index 01df4cd7c753fe12cee4c6d37a5ba9400b6f8f41..0c890347cde3d9559b0b0103c2c2c11825d51fae 100644 (file)
@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
                goto out;
 
        inode = d_inode(fh->fh_dentry);
-       if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
-               error = -EOPNOTSUPP;
-               goto out_errno;
-       }
 
        error = fh_want_write(fh);
        if (error)
                goto out_errno;
 
-       error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+       fh_lock(fh);
+
+       error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
        if (error)
-               goto out_drop_write;
-       error = inode->i_op->set_acl(inode, argp->acl_default,
-                                    ACL_TYPE_DEFAULT);
+               goto out_drop_lock;
+       error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
 
-out_drop_write:
+out_drop_lock:
+       fh_unlock(fh);
        fh_drop_write(fh);
 out_errno:
        nfserr = nfserrno(error);
index 6adabd6049b7199ad72e01e361e3637a88b016ff..71292a0d6f09226e828b327ba139a296dc76a08b 100644 (file)
@@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
        dentry = fhp->fh_dentry;
        inode = d_inode(dentry);
 
-       if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
-               return nfserr_attrnotsupp;
-
        if (S_ISDIR(inode->i_mode))
                flags = NFS4_ACL_DIR;
 
@@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
        if (host_error < 0)
                goto out_nfserr;
 
-       host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
+       fh_lock(fhp);
+
+       host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
        if (host_error < 0)
-               goto out_release;
+               goto out_drop_lock;
 
        if (S_ISDIR(inode->i_mode)) {
-               host_error = inode->i_op->set_acl(inode, dpacl,
-                                                 ACL_TYPE_DEFAULT);
+               host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
        }
 
-out_release:
+out_drop_lock:
+       fh_unlock(fhp);
+
        posix_acl_release(pacl);
        posix_acl_release(dpacl);
 out_nfserr:
index 7389cb1d7409cf16c31b041d88ae59cbe09d5322..04c68d9003249f884a0d4e49467873c8d75ed9a9 100644 (file)
@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
        }
 }
 
-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
-{
-       struct rpc_xprt *xprt;
-
-       if (args->protocol != XPRT_TRANSPORT_BC_TCP)
-               return rpc_create(args);
-
-       xprt = args->bc_xprt->xpt_bc_xprt;
-       if (xprt) {
-               xprt_get(xprt);
-               return rpc_create_xprt(args, xprt);
-       }
-
-       return rpc_create(args);
-}
-
 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
 {
        int maxtime = max_cb_time(clp->net);
@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
                args.authflavor = ses->se_cb_sec.flavor;
        }
        /* Create RPC client */
-       client = create_backchannel_client(&args);
+       client = rpc_create(&args);
        if (IS_ERR(client)) {
                dprintk("NFSD: couldn't create callback client: %ld\n",
                        PTR_ERR(client));
index f5f82e145018059bbeb265cd42a279a3009dfd6b..70d0b9b33031ee88985da10ff2d11c782a2b49f6 100644 (file)
@@ -3480,12 +3480,17 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
 }
 
 static struct nfs4_ol_stateid *
-init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
-               struct nfsd4_open *open)
+init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
 {
 
        struct nfs4_openowner *oo = open->op_openowner;
        struct nfs4_ol_stateid *retstp = NULL;
+       struct nfs4_ol_stateid *stp;
+
+       stp = open->op_stp;
+       /* We are moving these outside of the spinlocks to avoid the warnings */
+       mutex_init(&stp->st_mutex);
+       mutex_lock(&stp->st_mutex);
 
        spin_lock(&oo->oo_owner.so_client->cl_lock);
        spin_lock(&fp->fi_lock);
@@ -3493,6 +3498,8 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
        retstp = nfsd4_find_existing_open(fp, open);
        if (retstp)
                goto out_unlock;
+
+       open->op_stp = NULL;
        atomic_inc(&stp->st_stid.sc_count);
        stp->st_stid.sc_type = NFS4_OPEN_STID;
        INIT_LIST_HEAD(&stp->st_locks);
@@ -3502,14 +3509,19 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = 0;
        stp->st_openstp = NULL;
-       init_rwsem(&stp->st_rwsem);
        list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
        list_add(&stp->st_perfile, &fp->fi_stateids);
 
 out_unlock:
        spin_unlock(&fp->fi_lock);
        spin_unlock(&oo->oo_owner.so_client->cl_lock);
-       return retstp;
+       if (retstp) {
+               mutex_lock(&retstp->st_mutex);
+               /* To keep mutex tracking happy */
+               mutex_unlock(&stp->st_mutex);
+               stp = retstp;
+       }
+       return stp;
 }
 
 /*
@@ -4305,7 +4317,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
        struct nfs4_file *fp = NULL;
        struct nfs4_ol_stateid *stp = NULL;
-       struct nfs4_ol_stateid *swapstp = NULL;
        struct nfs4_delegation *dp = NULL;
        __be32 status;
 
@@ -4335,32 +4346,28 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
         */
        if (stp) {
                /* Stateid was found, this is an OPEN upgrade */
-               down_read(&stp->st_rwsem);
+               mutex_lock(&stp->st_mutex);
                status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
                if (status) {
-                       up_read(&stp->st_rwsem);
+                       mutex_unlock(&stp->st_mutex);
                        goto out;
                }
        } else {
-               stp = open->op_stp;
-               open->op_stp = NULL;
-               swapstp = init_open_stateid(stp, fp, open);
-               if (swapstp) {
-                       nfs4_put_stid(&stp->st_stid);
-                       stp = swapstp;
-                       down_read(&stp->st_rwsem);
+               /* stp is returned locked. */
+               stp = init_open_stateid(fp, open);
+               /* See if we lost the race to some other thread */
+               if (stp->st_access_bmap != 0) {
                        status = nfs4_upgrade_open(rqstp, fp, current_fh,
                                                stp, open);
                        if (status) {
-                               up_read(&stp->st_rwsem);
+                               mutex_unlock(&stp->st_mutex);
                                goto out;
                        }
                        goto upgrade_out;
                }
-               down_read(&stp->st_rwsem);
                status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
                if (status) {
-                       up_read(&stp->st_rwsem);
+                       mutex_unlock(&stp->st_mutex);
                        release_open_stateid(stp);
                        goto out;
                }
@@ -4372,7 +4379,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        }
 upgrade_out:
        nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
-       up_read(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
 
        if (nfsd4_has_session(&resp->cstate)) {
                if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
@@ -4977,12 +4984,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
                 * revoked delegations are kept only for free_stateid.
                 */
                return nfserr_bad_stateid;
-       down_write(&stp->st_rwsem);
+       mutex_lock(&stp->st_mutex);
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status == nfs_ok)
                status = nfs4_check_fh(current_fh, &stp->st_stid);
        if (status != nfs_ok)
-               up_write(&stp->st_rwsem);
+               mutex_unlock(&stp->st_mutex);
        return status;
 }
 
@@ -5030,7 +5037,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
                return status;
        oo = openowner(stp->st_stateowner);
        if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
-               up_write(&stp->st_rwsem);
+               mutex_unlock(&stp->st_mutex);
                nfs4_put_stid(&stp->st_stid);
                return nfserr_bad_stateid;
        }
@@ -5062,12 +5069,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        oo = openowner(stp->st_stateowner);
        status = nfserr_bad_stateid;
        if (oo->oo_flags & NFS4_OO_CONFIRMED) {
-               up_write(&stp->st_rwsem);
+               mutex_unlock(&stp->st_mutex);
                goto put_stateid;
        }
        oo->oo_flags |= NFS4_OO_CONFIRMED;
        nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
        dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
                __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
 
@@ -5143,7 +5150,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
        nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
        status = nfs_ok;
 put_stateid:
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
        nfs4_put_stid(&stp->st_stid);
 out:
        nfsd4_bump_seqid(cstate, status);
@@ -5196,7 +5203,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (status)
                goto out; 
        nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
 
        nfsd4_close_open_stateid(stp);
 
@@ -5422,7 +5429,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
-       init_rwsem(&stp->st_rwsem);
+       mutex_init(&stp->st_mutex);
        list_add(&stp->st_locks, &open_stp->st_locks);
        list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
        spin_lock(&fp->fi_lock);
@@ -5591,7 +5598,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                                        &open_stp, nn);
                if (status)
                        goto out;
-               up_write(&open_stp->st_rwsem);
+               mutex_unlock(&open_stp->st_mutex);
                open_sop = openowner(open_stp->st_stateowner);
                status = nfserr_bad_stateid;
                if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
@@ -5600,7 +5607,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                status = lookup_or_create_lock_state(cstate, open_stp, lock,
                                                        &lock_stp, &new);
                if (status == nfs_ok)
-                       down_write(&lock_stp->st_rwsem);
+                       mutex_lock(&lock_stp->st_mutex);
        } else {
                status = nfs4_preprocess_seqid_op(cstate,
                                       lock->lk_old_lock_seqid,
@@ -5704,7 +5711,7 @@ out:
                    seqid_mutating_err(ntohl(status)))
                        lock_sop->lo_owner.so_seqid++;
 
-               up_write(&lock_stp->st_rwsem);
+               mutex_unlock(&lock_stp->st_mutex);
 
                /*
                 * If this is a new, never-before-used stateid, and we are
@@ -5874,7 +5881,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 fput:
        fput(filp);
 put_stateid:
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
        nfs4_put_stid(&stp->st_stid);
 out:
        nfsd4_bump_seqid(cstate, status);
index 986e51e5ceac882f703e1d0262fdc66d356a453f..64053eadeb818f2a754bd791af7010b992e99d1a 100644 (file)
@@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
        unsigned char                   st_access_bmap;
        unsigned char                   st_deny_bmap;
        struct nfs4_ol_stateid          *st_openstp;
-       struct rw_semaphore             st_rwsem;
+       struct mutex                    st_mutex;
 };
 
 static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
index 809bd2de7ad0602c638464e4ecd5a736c14fa878..e9fd241b9a0acd685fe1df0e62adef8839bb54e6 100644 (file)
@@ -439,7 +439,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
        if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
                return 0;
        bytes = le16_to_cpu(sbp->s_bytes);
-       if (bytes > BLOCK_SIZE)
+       if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
                return 0;
        crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
                       sumoff);
index e27e6527912bb567f10d03c7b272f3922f69f3ca..4342c7ee7d202153126fbaace723a19846d02fd4 100644 (file)
@@ -1,7 +1,5 @@
 ccflags-y := -Ifs/ocfs2
 
-ccflags-y += -DCATCH_BH_JBD_RACES
-
 obj-$(CONFIG_OCFS2_FS) +=      \
        ocfs2.o                 \
        ocfs2_stackglue.o
index fe50ded1b4ce763e33579ab54a982e64ed06003e..498641eed2db83efe31923c2f7d7517a499f836c 100644 (file)
@@ -139,11 +139,16 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
 
                lock_buffer(bh);
                if (buffer_jbd(bh)) {
+#ifdef CATCH_BH_JBD_RACES
                        mlog(ML_ERROR,
                             "block %llu had the JBD bit set "
                             "while I was in lock_buffer!",
                             (unsigned long long)bh->b_blocknr);
                        BUG();
+#else
+                       unlock_buffer(bh);
+                       continue;
+#endif
                }
 
                clear_buffer_uptodate(bh);
index 22f0253a3567745ed02520573c8fc99c56189208..5c9d2d80ff70bf851e835c97adf775bbac963641 100644 (file)
@@ -405,12 +405,21 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
                err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
        } else {
                const struct cred *old_cred;
+               struct cred *override_cred;
 
                old_cred = ovl_override_creds(dentry->d_sb);
 
-               err = ovl_create_over_whiteout(dentry, inode, &stat, link,
-                                              hardlink);
+               err = -ENOMEM;
+               override_cred = prepare_creds();
+               if (override_cred) {
+                       override_cred->fsuid = old_cred->fsuid;
+                       override_cred->fsgid = old_cred->fsgid;
+                       put_cred(override_creds(override_cred));
+                       put_cred(override_cred);
 
+                       err = ovl_create_over_whiteout(dentry, inode, &stat,
+                                                      link, hardlink);
+               }
                revert_creds(old_cred);
        }
 
@@ -496,6 +505,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        struct dentry *upper;
        struct dentry *opaquedir = NULL;
        int err;
+       int flags = 0;
 
        if (WARN_ON(!workdir))
                return -EROFS;
@@ -525,46 +535,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        if (err)
                goto out_dput;
 
-       whiteout = ovl_whiteout(workdir, dentry);
-       err = PTR_ERR(whiteout);
-       if (IS_ERR(whiteout))
+       upper = lookup_one_len(dentry->d_name.name, upperdir,
+                              dentry->d_name.len);
+       err = PTR_ERR(upper);
+       if (IS_ERR(upper))
                goto out_unlock;
 
-       upper = ovl_dentry_upper(dentry);
-       if (!upper) {
-               upper = lookup_one_len(dentry->d_name.name, upperdir,
-                                      dentry->d_name.len);
-               err = PTR_ERR(upper);
-               if (IS_ERR(upper))
-                       goto kill_whiteout;
-
-               err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
-               dput(upper);
-               if (err)
-                       goto kill_whiteout;
-       } else {
-               int flags = 0;
+       err = -ESTALE;
+       if ((opaquedir && upper != opaquedir) ||
+           (!opaquedir && ovl_dentry_upper(dentry) &&
+            upper != ovl_dentry_upper(dentry))) {
+               goto out_dput_upper;
+       }
 
-               if (opaquedir)
-                       upper = opaquedir;
-               err = -ESTALE;
-               if (upper->d_parent != upperdir)
-                       goto kill_whiteout;
+       whiteout = ovl_whiteout(workdir, dentry);
+       err = PTR_ERR(whiteout);
+       if (IS_ERR(whiteout))
+               goto out_dput_upper;
 
-               if (is_dir)
-                       flags |= RENAME_EXCHANGE;
+       if (d_is_dir(upper))
+               flags = RENAME_EXCHANGE;
 
-               err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
-               if (err)
-                       goto kill_whiteout;
+       err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
+       if (err)
+               goto kill_whiteout;
+       if (flags)
+               ovl_cleanup(wdir, upper);
 
-               if (is_dir)
-                       ovl_cleanup(wdir, upper);
-       }
        ovl_dentry_version_inc(dentry->d_parent);
 out_d_drop:
        d_drop(dentry);
        dput(whiteout);
+out_dput_upper:
+       dput(upper);
 out_unlock:
        unlock_rename(workdir, upperdir);
 out_dput:
index 0ed7c40124378cc9dce12d73e3c22449d2348909..d1cdc60dd68fa25aa74e2474a09e07aab08b7e26 100644 (file)
@@ -59,16 +59,40 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
        if (err)
                goto out;
 
+       if (attr->ia_valid & ATTR_SIZE) {
+               struct inode *realinode = d_inode(ovl_dentry_real(dentry));
+
+               err = -ETXTBSY;
+               if (atomic_read(&realinode->i_writecount) < 0)
+                       goto out_drop_write;
+       }
+
        err = ovl_copy_up(dentry);
        if (!err) {
+               struct inode *winode = NULL;
+
                upperdentry = ovl_dentry_upper(dentry);
 
+               if (attr->ia_valid & ATTR_SIZE) {
+                       winode = d_inode(upperdentry);
+                       err = get_write_access(winode);
+                       if (err)
+                               goto out_drop_write;
+               }
+
+               if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+                       attr->ia_valid &= ~ATTR_MODE;
+
                inode_lock(upperdentry->d_inode);
                err = notify_change(upperdentry, attr, NULL);
                if (!err)
                        ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
                inode_unlock(upperdentry->d_inode);
+
+               if (winode)
+                       put_write_access(winode);
        }
+out_drop_write:
        ovl_drop_write(dentry);
 out:
        return err;
@@ -121,16 +145,18 @@ int ovl_permission(struct inode *inode, int mask)
 
                err = vfs_getattr(&realpath, &stat);
                if (err)
-                       return err;
+                       goto out_dput;
 
+               err = -ESTALE;
                if ((stat.mode ^ inode->i_mode) & S_IFMT)
-                       return -ESTALE;
+                       goto out_dput;
 
                inode->i_mode = stat.mode;
                inode->i_uid = stat.uid;
                inode->i_gid = stat.gid;
 
-               return generic_permission(inode, mask);
+               err = generic_permission(inode, mask);
+               goto out_dput;
        }
 
        /* Careful in RCU walk mode */
@@ -238,41 +264,27 @@ out:
        return err;
 }
 
-static bool ovl_need_xattr_filter(struct dentry *dentry,
-                                 enum ovl_path_type type)
-{
-       if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
-               return S_ISDIR(dentry->d_inode->i_mode);
-       else
-               return false;
-}
-
 ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
                     const char *name, void *value, size_t size)
 {
-       struct path realpath;
-       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+       struct dentry *realdentry = ovl_dentry_real(dentry);
 
-       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+       if (ovl_is_private_xattr(name))
                return -ENODATA;
 
-       return vfs_getxattr(realpath.dentry, name, value, size);
+       return vfs_getxattr(realdentry, name, value, size);
 }
 
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
-       struct path realpath;
-       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+       struct dentry *realdentry = ovl_dentry_real(dentry);
        ssize_t res;
        int off;
 
-       res = vfs_listxattr(realpath.dentry, list, size);
+       res = vfs_listxattr(realdentry, list, size);
        if (res <= 0 || size == 0)
                return res;
 
-       if (!ovl_need_xattr_filter(dentry, type))
-               return res;
-
        /* filter out private xattrs */
        for (off = 0; off < res;) {
                char *s = list + off;
@@ -302,7 +314,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
                goto out;
 
        err = -ENODATA;
-       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+       if (ovl_is_private_xattr(name))
                goto out_drop_write;
 
        if (!OVL_TYPE_UPPER(type)) {
@@ -401,12 +413,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
        if (!inode)
                return NULL;
 
-       mode &= S_IFMT;
-
        inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_flags |= S_NOATIME | S_NOCMTIME;
 
+       mode &= S_IFMT;
        switch (mode) {
        case S_IFDIR:
                inode->i_private = oe;
index 4bd9b5ba8f42008b7f72cf271fac89c67b7eca6d..cfbca53590d078b89ca613e65c3b10486763f656 100644 (file)
@@ -187,6 +187,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
 {
        to->i_uid = from->i_uid;
        to->i_gid = from->i_gid;
+       to->i_mode = from->i_mode;
 }
 
 /* dir.c */
index ce02f46029da7c3aa7c69be9291d62c81baeb642..9a7693d5f8fffb38bd4a07804e4627ed7f50361a 100644 (file)
@@ -1082,11 +1082,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                        if (err < 0)
                                goto out_put_workdir;
 
-                       if (!err) {
-                               pr_err("overlayfs: upper fs needs to support d_type.\n");
-                               err = -EINVAL;
-                               goto out_put_workdir;
-                       }
+                       /*
+                        * We allowed this configuration and don't want to
+                        * break users over kernel upgrade. So warn instead
+                        * of erroring out.
+                        */
+                       if (!err)
+                               pr_warn("overlayfs: upper fs needs to support d_type.\n");
                }
        }
 
index 8a4a266beff39dda0be9ca39df74bc847fd0a2d3..edc452c2a563a0f86849094a848786a7728ef99e 100644 (file)
@@ -820,39 +820,43 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
        return error;
 }
 
-static int
-posix_acl_xattr_set(const struct xattr_handler *handler,
-                   struct dentry *unused, struct inode *inode,
-                   const char *name, const void *value,
-                   size_t size, int flags)
+int
+set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
 {
-       struct posix_acl *acl = NULL;
-       int ret;
-
        if (!IS_POSIXACL(inode))
                return -EOPNOTSUPP;
        if (!inode->i_op->set_acl)
                return -EOPNOTSUPP;
 
-       if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
-               return value ? -EACCES : 0;
+       if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+               return acl ? -EACCES : 0;
        if (!inode_owner_or_capable(inode))
                return -EPERM;
 
+       if (acl) {
+               int ret = posix_acl_valid(acl);
+               if (ret)
+                       return ret;
+       }
+       return inode->i_op->set_acl(inode, acl, type);
+}
+EXPORT_SYMBOL(set_posix_acl);
+
+static int
+posix_acl_xattr_set(const struct xattr_handler *handler,
+                   struct dentry *unused, struct inode *inode,
+                   const char *name, const void *value,
+                   size_t size, int flags)
+{
+       struct posix_acl *acl = NULL;
+       int ret;
+
        if (value) {
                acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
-
-               if (acl) {
-                       ret = posix_acl_valid(acl);
-                       if (ret)
-                               goto out;
-               }
        }
-
-       ret = inode->i_op->set_acl(inode, acl, handler->flags);
-out:
+       ret = set_posix_acl(inode, handler->flags, acl);
        posix_acl_release(acl);
        return ret;
 }
index 55bc7d6c8aacaed2f4892bd1a554e3baf939bf34..06702783bf40254593f82b76dd9745ad557ac334 100644 (file)
@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
        if (IS_ERR(sb))
                return ERR_CAST(sb);
 
+       /*
+        * procfs isn't actually a stacking filesystem; however, there is
+        * too much magic going on inside it to permit stacking things on
+        * top of it
+        */
+       sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
+
        if (!proc_parse_options(options, ns)) {
                deactivate_locked_super(sb);
                return ERR_PTR(-EINVAL);
index b8f2d1e8c6453c373655be98c03c1118acd71f2c..c72c16c5a60f9dfd0a151022b7885073fa872270 100644 (file)
@@ -1393,7 +1393,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        unsigned long safe_mask = 0;
        unsigned int commit_max_age = (unsigned int)-1;
        struct reiserfs_journal *journal = SB_JOURNAL(s);
-       char *new_opts = kstrdup(arg, GFP_KERNEL);
+       char *new_opts;
        int err;
        char *qf_names[REISERFS_MAXQUOTAS];
        unsigned int qfmt = 0;
@@ -1401,6 +1401,10 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        int i;
 #endif
 
+       new_opts = kstrdup(arg, GFP_KERNEL);
+       if (arg && !new_opts)
+               return -ENOMEM;
+
        sync_filesystem(s);
        reiserfs_write_lock(s);
 
@@ -1546,7 +1550,8 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        }
 
 out_ok_unlocked:
-       replace_mount_options(s, new_opts);
+       if (new_opts)
+               replace_mount_options(s, new_opts);
        return 0;
 
 out_err_unlock:
index 08316972ff934102b58783b1609c2d1920a47ea8..7bbf420d128988470b0fb90491f05587b83b9052 100644 (file)
@@ -52,6 +52,7 @@
 #include "ubifs.h"
 #include <linux/mount.h>
 #include <linux/slab.h>
+#include <linux/migrate.h>
 
 static int read_block(struct inode *inode, void *addr, unsigned int block,
                      struct ubifs_data_node *dn)
@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
        return ret;
 }
 
+#ifdef CONFIG_MIGRATION
+static int ubifs_migrate_page(struct address_space *mapping,
+               struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+       int rc;
+
+       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+       if (rc != MIGRATEPAGE_SUCCESS)
+               return rc;
+
+       if (PagePrivate(page)) {
+               ClearPagePrivate(page);
+               SetPagePrivate(newpage);
+       }
+
+       migrate_page_copy(newpage, page);
+       return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
 {
        /*
@@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
        .write_end      = ubifs_write_end,
        .invalidatepage = ubifs_invalidatepage,
        .set_page_dirty = ubifs_set_page_dirty,
+#ifdef CONFIG_MIGRATION
+       .migratepage    = ubifs_migrate_page,
+#endif
        .releasepage    = ubifs_releasepage,
 };
 
index 5f861ed287c3fde0fbf87de4c740e672213acd5d..888c364b2fe95a87ffbc5d303f403d8875fb4b9f 100644 (file)
@@ -295,7 +295,8 @@ static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
                map = &UDF_SB(sb)->s_partmaps[partition];
                /* map to sparable/physical partition desc */
                phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
-                       map->s_partition_num, ext_offset + offset);
+                       map->s_type_specific.s_metadata.s_phys_partition_ref,
+                       ext_offset + offset);
        }
 
        brelse(epos.bh);
@@ -317,14 +318,18 @@ uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
        mdata = &map->s_type_specific.s_metadata;
        inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
 
-       /* We shouldn't mount such media... */
-       BUG_ON(!inode);
+       if (!inode)
+               return 0xFFFFFFFF;
+
        retblk = udf_try_read_meta(inode, block, partition, offset);
        if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
                udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
                if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
                        mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
-                               mdata->s_mirror_file_loc, map->s_partition_num);
+                               mdata->s_mirror_file_loc,
+                               mdata->s_phys_partition_ref);
+                       if (IS_ERR(mdata->s_mirror_fe))
+                               mdata->s_mirror_fe = NULL;
                        mdata->s_flags |= MF_MIRROR_FE_LOADED;
                }
 
index 5e2c8c814e1bda634c54ade051bd0143e20d553a..4942549e7dc8b3758dd8c8fadb99e8fba4265c00 100644 (file)
@@ -951,13 +951,13 @@ out2:
 }
 
 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
-                                       u32 meta_file_loc, u32 partition_num)
+                                       u32 meta_file_loc, u32 partition_ref)
 {
        struct kernel_lb_addr addr;
        struct inode *metadata_fe;
 
        addr.logicalBlockNum = meta_file_loc;
-       addr.partitionReferenceNum = partition_num;
+       addr.partitionReferenceNum = partition_ref;
 
        metadata_fe = udf_iget_special(sb, &addr);
 
@@ -974,7 +974,8 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
        return metadata_fe;
 }
 
-static int udf_load_metadata_files(struct super_block *sb, int partition)
+static int udf_load_metadata_files(struct super_block *sb, int partition,
+                                  int type1_index)
 {
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct udf_part_map *map;
@@ -984,20 +985,21 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
 
        map = &sbi->s_partmaps[partition];
        mdata = &map->s_type_specific.s_metadata;
+       mdata->s_phys_partition_ref = type1_index;
 
        /* metadata address */
        udf_debug("Metadata file location: block = %d part = %d\n",
-                 mdata->s_meta_file_loc, map->s_partition_num);
+                 mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
 
        fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
-                                        map->s_partition_num);
+                                        mdata->s_phys_partition_ref);
        if (IS_ERR(fe)) {
                /* mirror file entry */
                udf_debug("Mirror metadata file location: block = %d part = %d\n",
-                         mdata->s_mirror_file_loc, map->s_partition_num);
+                         mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
 
                fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
-                                                map->s_partition_num);
+                                                mdata->s_phys_partition_ref);
 
                if (IS_ERR(fe)) {
                        udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
@@ -1015,7 +1017,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
        */
        if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
                addr.logicalBlockNum = mdata->s_bitmap_file_loc;
-               addr.partitionReferenceNum = map->s_partition_num;
+               addr.partitionReferenceNum = mdata->s_phys_partition_ref;
 
                udf_debug("Bitmap file location: block = %d part = %d\n",
                          addr.logicalBlockNum, addr.partitionReferenceNum);
@@ -1283,7 +1285,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
        p = (struct partitionDesc *)bh->b_data;
        partitionNumber = le16_to_cpu(p->partitionNumber);
 
-       /* First scan for TYPE1, SPARABLE and METADATA partitions */
+       /* First scan for TYPE1 and SPARABLE partitions */
        for (i = 0; i < sbi->s_partitions; i++) {
                map = &sbi->s_partmaps[i];
                udf_debug("Searching map: (%d == %d)\n",
@@ -1333,7 +1335,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
                goto out_bh;
 
        if (map->s_partition_type == UDF_METADATA_MAP25) {
-               ret = udf_load_metadata_files(sb, i);
+               ret = udf_load_metadata_files(sb, i, type1_idx);
                if (ret < 0) {
                        udf_err(sb, "error loading MetaData partition map %d\n",
                                i);
index 27b5335730c9337424c0c32790e74173ac61f747..c13875d669c0f45722c5f138a5fa34786f2b3d75 100644 (file)
@@ -61,6 +61,11 @@ struct udf_meta_data {
        __u32   s_bitmap_file_loc;
        __u32   s_alloc_unit_size;
        __u16   s_align_unit_size;
+       /*
+        * Partition Reference Number of the associated physical / sparable
+        * partition
+        */
+       __u16   s_phys_partition_ref;
        int     s_flags;
        struct inode *s_metadata_fe;
        struct inode *s_mirror_fe;
index dbca7375deefa3f7d2499f516697ffdd28178546..63a6ff2cfc6821961265136e1f62cf455da3b195 100644 (file)
@@ -1575,6 +1575,12 @@ xfs_ioc_swapext(
                goto out_put_tmp_file;
        }
 
+       if (f.file->f_op != &xfs_file_operations ||
+           tmp.file->f_op != &xfs_file_operations) {
+               error = -EINVAL;
+               goto out_put_tmp_file;
+       }
+
        ip = XFS_I(file_inode(f.file));
        tip = XFS_I(file_inode(tmp.file));
 
index 797ae2ec8eee2d129653514cea7b891db96e38d6..29c691265b49357bc0d036b71897348806c58e6c 100644 (file)
@@ -78,6 +78,7 @@
 
 /* ACPI PCI Interrupt Link (pci_link.c) */
 
+int acpi_irq_penalty_init(void);
 int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
                               int *polarity, char **name);
 int acpi_pci_link_free_irq(acpi_handle handle);
index 4e4c21491c4188a4401887feb55978fe58b1ff47..1ff3a76c265dbcbaea2f427359bb16c75e719200 100644 (file)
@@ -192,7 +192,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
 /*
  * Optionally support group module level code.
  */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
 
 /*
  * Optionally use 32-bit FADT addresses if and when there is a conflict
index 70a41f74203728ecfd86e5807563110a690238c5..5731ccb42585b7d4a04c59487e4efb777e29d697 100644 (file)
@@ -51,7 +51,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
  */
 extern bool acpi_video_handles_brightness_key_presses(void);
 extern int acpi_video_get_levels(struct acpi_device *device,
-                                struct acpi_video_device_brightness **dev_br);
+                                struct acpi_video_device_brightness **dev_br,
+                                int *pmax_level);
 #else
 static inline int acpi_video_register(void) { return 0; }
 static inline void acpi_video_unregister(void) { return; }
@@ -72,7 +73,8 @@ static inline bool acpi_video_handles_brightness_key_presses(void)
        return false;
 }
 static inline int acpi_video_get_levels(struct acpi_device *device,
-                       struct acpi_video_device_brightness **dev_br)
+                       struct acpi_video_device_brightness **dev_br,
+                       int *pmax_level)
 {
        return -ENODEV;
 }
index 6bd05700d8c94cfe5c190008c1eb8910bf8ae0ae..05f05f17a7c2e4fc14f8fa858abc9f7d7715c7b9 100644 (file)
 
 #include <asm-generic/qspinlock_types.h>
 
+/**
+ * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+#ifndef queued_spin_unlock_wait
+extern void queued_spin_unlock_wait(struct qspinlock *lock);
+#endif
+
 /**
  * queued_spin_is_locked - is the spinlock locked?
  * @lock: Pointer to queued spinlock structure
  * Return: 1 if it is locked, 0 otherwise
  */
+#ifndef queued_spin_is_locked
 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
 {
        /*
-        * queued_spin_lock_slowpath() can ACQUIRE the lock before
-        * issuing the unordered store that sets _Q_LOCKED_VAL.
-        *
-        * See both smp_cond_acquire() sites for more detail.
-        *
-        * This however means that in code like:
-        *
-        *   spin_lock(A)               spin_lock(B)
-        *   spin_unlock_wait(B)        spin_is_locked(A)
-        *   do_something()             do_something()
-        *
-        * Both CPUs can end up running do_something() because the store
-        * setting _Q_LOCKED_VAL will pass through the loads in
-        * spin_unlock_wait() and/or spin_is_locked().
+        * See queued_spin_unlock_wait().
         *
-        * Avoid this by issuing a full memory barrier between the spin_lock()
-        * and the loads in spin_unlock_wait() and spin_is_locked().
-        *
-        * Note that regular mutual exclusion doesn't care about this
-        * delayed store.
+        * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
+        * isn't immediately observable.
         */
-       smp_mb();
-       return atomic_read(&lock->val) & _Q_LOCKED_MASK;
+       return atomic_read(&lock->val);
 }
+#endif
 
 /**
  * queued_spin_value_unlocked - is the spinlock structure unlocked?
@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 }
 #endif
 
-/**
- * queued_spin_unlock_wait - wait until current lock holder releases the lock
- * @lock : Pointer to queued spinlock structure
- *
- * There is a very slight possibility of live-lock if the lockers keep coming
- * and the waiter is just unfortunate enough to not see any unlock state.
- */
-static inline void queued_spin_unlock_wait(struct qspinlock *lock)
-{
-       /* See queued_spin_is_locked() */
-       smp_mb();
-       while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
-               cpu_relax();
-}
-
 #ifndef virt_spin_lock
 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
 {
index 6a67ab94b553363934bc9c2e07eb12d6c8a977f7..081d0f258d4c98d3cb36615c2dd252b630052b99 100644 (file)
 
 #define INIT_TEXT                                                      \
        *(.init.text)                                                   \
+       *(.text.startup)                                                \
        MEM_DISCARD(init.text)
 
 #define EXIT_DATA                                                      \
        *(.exit.data)                                                   \
+       *(.fini_array)                                                  \
+       *(.dtors)                                                       \
        MEM_DISCARD(exit.data)                                          \
        MEM_DISCARD(exit.rodata)
 
 #define EXIT_TEXT                                                      \
        *(.exit.text)                                                   \
+       *(.text.exit)                                                   \
        MEM_DISCARD(exit.text)
 
 #define EXIT_CALL                                                      \
index 25afb31f0389edbb1f9f8a9e3558602df38f816b..261b86d20e7718f13c95290c63e277b5fafca96d 100644 (file)
 enum analogix_dp_devtype {
        EXYNOS_DP,
        RK3288_DP,
+       RK3399_EDP,
 };
 
+static inline bool is_rockchip(enum analogix_dp_devtype type)
+{
+       return type == RK3288_DP || type == RK3399_EDP;
+}
+
 struct analogix_dp_plat_data {
        enum analogix_dp_devtype dev_type;
        struct drm_panel *panel;
@@ -28,7 +34,8 @@ struct analogix_dp_plat_data {
        int (*power_off)(struct analogix_dp_plat_data *);
        int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
                      struct drm_connector *);
-       int (*get_modes)(struct analogix_dp_plat_data *);
+       int (*get_modes)(struct analogix_dp_plat_data *,
+                        struct drm_connector *);
 };
 
 int analogix_dp_resume(struct device *dev);
index 84f1a8eefbdbc8ae89925aff3dafaaccdf99b4b4..d3778652e4629ad4765de4aa892a0ff950647b34 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 #include <linux/workqueue.h>
+#include <linux/fence.h>
 
 #include <asm/mman.h>
 #include <asm/pgalloc.h>
@@ -66,6 +67,7 @@
 
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_crtc.h>
+#include <drm/drm_fourcc.h>
 #include <drm/drm_global.h>
 #include <drm/drm_hashtab.h>
 #include <drm/drm_mem_util.h>
@@ -83,6 +85,8 @@ struct drm_local_map;
 struct drm_device_dma;
 struct drm_dma_handle;
 struct drm_gem_object;
+struct drm_master;
+struct drm_vblank_crtc;
 
 struct device_node;
 struct videomode;
@@ -281,13 +285,14 @@ struct drm_ioctl_desc {
 
 /* Event queued up for userspace to read */
 struct drm_pending_event {
+       struct completion *completion;
        struct drm_event *event;
+       struct fence *fence;
        struct list_head link;
        struct list_head pending_link;
        struct drm_file *file_priv;
        pid_t pid; /* pid of requester, no guarantee it's valid by the time
                      we deliver the event, for tracing only */
-       void (*destroy)(struct drm_pending_event *event);
 };
 
 /* initial implementaton using a linked list - todo hashtab */
@@ -299,8 +304,6 @@ struct drm_prime_file_private {
 /** File private data */
 struct drm_file {
        unsigned authenticated :1;
-       /* Whether we're master for a minor. Protected by master_mutex */
-       unsigned is_master :1;
        /* true when the client has asked us to expose stereo 3D mode flags */
        unsigned stereo_allowed :1;
        /*
@@ -311,10 +314,10 @@ struct drm_file {
        /* true if client understands atomic properties */
        unsigned atomic:1;
        /*
-        * This client is allowed to gain master privileges for @master.
+        * This client is the creator of @master.
         * Protected by struct drm_device::master_mutex.
         */
-       unsigned allowed_master:1;
+       unsigned is_master:1;
 
        struct pid *pid;
        kuid_t uid;
@@ -332,7 +335,7 @@ struct drm_file {
        void *driver_priv;
 
        struct drm_master *master; /* master this node is currently associated with
-                                     N.B. not always minor->master */
+                                     N.B. not always dev->master */
        /**
         * fbs - List of framebuffers associated with this file.
         *
@@ -371,32 +374,6 @@ struct drm_lock_data {
        int idle_has_lock;
 };
 
-/**
- * struct drm_master - drm master structure
- *
- * @refcount: Refcount for this master object.
- * @minor: Link back to minor char device we are master for. Immutable.
- * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
- * @unique_len: Length of unique field. Protected by drm_global_mutex.
- * @magic_map: Map of used authentication tokens. Protected by struct_mutex.
- * @lock: DRI lock information.
- * @driver_priv: Pointer to driver-private information.
- */
-struct drm_master {
-       struct kref refcount;
-       struct drm_minor *minor;
-       char *unique;
-       int unique_len;
-       struct idr magic_map;
-       struct drm_lock_data lock;
-       void *driver_priv;
-};
-
-/* Size of ringbuffer for vblank timestamps. Just double-buffer
- * in initial implementation.
- */
-#define DRM_VBLANKTIME_RBSIZE 2
-
 /* Flags and return codes for get_vblank_timestamp() driver function. */
 #define DRM_CALLED_FROM_VBLIRQ 1
 #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
@@ -420,8 +397,6 @@ struct drm_driver {
        void (*postclose) (struct drm_device *, struct drm_file *);
        void (*lastclose) (struct drm_device *);
        int (*unload) (struct drm_device *);
-       int (*suspend) (struct drm_device *, pm_message_t state);
-       int (*resume) (struct drm_device *);
        int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
        int (*dma_quiescent) (struct drm_device *);
        int (*context_dtor) (struct drm_device *dev, int context);
@@ -434,7 +409,7 @@ struct drm_driver {
         *
         * Driver callback for fetching a raw hardware vblank counter for @crtc.
         * If a device doesn't have a hardware counter, the driver can simply
-        * return the value of drm_vblank_count. The DRM core will account for
+        * use drm_vblank_no_hw_counter() function. The DRM core will account for
         * missed vblank events while interrupts where disabled based on system
         * timestamps.
         *
@@ -452,8 +427,8 @@ struct drm_driver {
         * @pipe: which irq to enable
         *
         * Enable vblank interrupts for @crtc.  If the device doesn't have
-        * a hardware vblank counter, this routine should be a no-op, since
-        * interrupts will have to stay on to keep the count accurate.
+        * a hardware vblank counter, the driver should use the
+        * drm_vblank_no_hw_counter() function that keeps a virtual counter.
         *
         * RETURNS
         * Zero on success, appropriate errno if the given @crtc's vblank
@@ -467,8 +442,8 @@ struct drm_driver {
         * @pipe: which irq to enable
         *
         * Disable vblank interrupts for @crtc.  If the device doesn't have
-        * a hardware vblank counter, this routine should be a no-op, since
-        * interrupts will have to stay on to keep the count accurate.
+        * a hardware vblank counter, the driver should use the
+        * drm_vblank_no_hw_counter() function that keeps a virtual counter.
         */
        void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
 
@@ -573,8 +548,7 @@ struct drm_driver {
 
        int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
                          bool from_open);
-       void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
-                           bool from_release);
+       void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
 
        int (*debugfs_init)(struct drm_minor *minor);
        void (*debugfs_cleanup)(struct drm_minor *minor);
@@ -708,38 +682,6 @@ struct drm_minor {
 
        struct list_head debugfs_list;
        struct mutex debugfs_lock; /* Protects debugfs_list. */
-
-       /* currently active master for this node. Protected by master_mutex */
-       struct drm_master *master;
-};
-
-
-struct drm_pending_vblank_event {
-       struct drm_pending_event base;
-       unsigned int pipe;
-       struct drm_event_vblank event;
-};
-
-struct drm_vblank_crtc {
-       struct drm_device *dev;         /* pointer to the drm_device */
-       wait_queue_head_t queue;        /**< VBLANK wait queue */
-       struct timer_list disable_timer;                /* delayed disable timer */
-
-       /* vblank counter, protected by dev->vblank_time_lock for writes */
-       u32 count;
-       /* vblank timestamps, protected by dev->vblank_time_lock for writes */
-       struct timeval time[DRM_VBLANKTIME_RBSIZE];
-
-       atomic_t refcount;              /* number of users of vblank interruptsper crtc */
-       u32 last;                       /* protected by dev->vbl_lock, used */
-                                       /* for wraparound handling */
-       u32 last_wait;                  /* Last vblank seqno waited per CRTC */
-       unsigned int inmodeset;         /* Display driver is setting mode */
-       unsigned int pipe;              /* crtc index */
-       int framedur_ns;                /* frame/field duration in ns */
-       int linedur_ns;                 /* line duration in ns */
-       bool enabled;                   /* so we don't call enable more than
-                                          once per disable */
 };
 
 /**
@@ -759,6 +701,10 @@ struct drm_device {
        struct drm_minor *control;              /**< Control node */
        struct drm_minor *primary;              /**< Primary node */
        struct drm_minor *render;               /**< Render node */
+
+       /* currently active master for this device. Protected by master_mutex */
+       struct drm_master *master;
+
        atomic_t unplugged;                     /**< Flag whether dev is dead */
        struct inode *anon_inode;               /**< inode for private address-space */
        char *unique;                           /**< unique name of the device */
@@ -872,6 +818,8 @@ struct drm_device {
        int switch_power_state;
 };
 
+#include <drm/drm_irq.h>
+
 #define DRM_SWITCH_POWER_ON 0
 #define DRM_SWITCH_POWER_OFF 1
 #define DRM_SWITCH_POWER_CHANGING 2
@@ -928,7 +876,6 @@ int drm_open(struct inode *inode, struct file *filp);
 ssize_t drm_read(struct file *filp, char __user *buffer,
                 size_t count, loff_t *offset);
 int drm_release(struct inode *inode, struct file *filp);
-int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
 int drm_event_reserve_init_locked(struct drm_device *dev,
                                  struct drm_file *file_priv,
@@ -959,75 +906,14 @@ void drm_clflush_virt_range(void *addr, unsigned long length);
  * DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
  */
 
-                               /* IRQ support (drm_irq.h) */
-extern int drm_irq_install(struct drm_device *dev, int irq);
-extern int drm_irq_uninstall(struct drm_device *dev);
-
-extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
-extern int drm_wait_vblank(struct drm_device *dev, void *data,
-                          struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
-extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
-                                    struct timeval *vblanktime);
-extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
-                                         struct timeval *vblanktime);
-extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
-                                 struct drm_pending_vblank_event *e);
-extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
-                                      struct drm_pending_vblank_event *e);
-extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
-                                struct drm_pending_vblank_event *e);
-extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
-                                     struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
-extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
-extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
-extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
-extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
-extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
-extern void drm_vblank_cleanup(struct drm_device *dev);
-extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
-
-extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
-                                                unsigned int pipe, int *max_error,
-                                                struct timeval *vblank_time,
-                                                unsigned flags,
-                                                const struct drm_display_mode *mode);
-extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
-                                           const struct drm_display_mode *mode);
-
-/**
- * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
- * @crtc: which CRTC's vblank waitqueue to retrieve
- *
- * This function returns a pointer to the vblank waitqueue for the CRTC.
- * Drivers can use this to implement vblank waits using wait_event() & co.
- */
-static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
-{
-       return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
-}
-
 /* Modesetting support */
 extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
 extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
 
-                               /* Stub support (drm_stub.h) */
-extern struct drm_master *drm_master_get(struct drm_master *master);
-extern void drm_master_put(struct drm_master **master);
-
-extern void drm_put_dev(struct drm_device *dev);
-extern void drm_unplug_dev(struct drm_device *dev);
+/* drm_drv.c */
+void drm_put_dev(struct drm_device *dev);
+void drm_unplug_dev(struct drm_device *dev);
 extern unsigned int drm_debug;
-extern bool drm_atomic;
 
                                /* Debugfs support */
 #if defined(CONFIG_DEBUG_FS)
@@ -1078,11 +964,13 @@ extern void drm_sysfs_hotplug_event(struct drm_device *dev);
 
 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                                 struct device *parent);
+int drm_dev_init(struct drm_device *dev,
+                struct drm_driver *driver,
+                struct device *parent);
 void drm_dev_ref(struct drm_device *dev);
 void drm_dev_unref(struct drm_device *dev);
 int drm_dev_register(struct drm_device *dev, unsigned long flags);
 void drm_dev_unregister(struct drm_device *dev);
-int drm_dev_set_unique(struct drm_device *dev, const char *name);
 
 struct drm_minor *drm_minor_acquire(unsigned int minor_id);
 void drm_minor_release(struct drm_minor *minor);
@@ -1135,7 +1023,6 @@ extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
 
 /* platform section */
 extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
-extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
 
 /* returns true if currently okay to sleep */
 static __inline__ bool drm_can_sleep(void)
index 92c84e9ab09a70db602c2bae7d4dcfeb6867600c..856a9c85a8383b5c382c6537375a4e33f5e6e7c0 100644 (file)
 
 #include <drm/drm_crtc.h>
 
+void drm_crtc_commit_put(struct drm_crtc_commit *commit);
+static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
+{
+       kref_get(&commit->ref);
+}
+
 struct drm_atomic_state * __must_check
 drm_atomic_state_alloc(struct drm_device *dev);
 void drm_atomic_state_clear(struct drm_atomic_state *state);
@@ -71,7 +77,7 @@ static inline struct drm_crtc_state *
 drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
                                   struct drm_crtc *crtc)
 {
-       return state->crtc_states[drm_crtc_index(crtc)];
+       return state->crtcs[drm_crtc_index(crtc)].state;
 }
 
 /**
@@ -86,7 +92,7 @@ static inline struct drm_plane_state *
 drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
                                    struct drm_plane *plane)
 {
-       return state->plane_states[drm_plane_index(plane)];
+       return state->planes[drm_plane_index(plane)].state;
 }
 
 /**
@@ -106,7 +112,43 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
        if (index >= state->num_connector)
                return NULL;
 
-       return state->connector_states[index];
+       return state->connectors[index].state;
+}
+
+/**
+ * __drm_atomic_get_current_plane_state - get current plane state
+ * @state: global atomic state object
+ * @plane: plane to grab
+ *
+ * This function returns the plane state for the given plane, either from
+ * @state, or if the plane isn't part of the atomic state update, from @plane.
+ * This is useful in atomic check callbacks, when drivers need to peek at, but
+ * not change, state of other planes, since it avoids threading an error code
+ * back up the call chain.
+ *
+ * WARNING:
+ *
+ * Note that this function is in general unsafe since it doesn't check for the
+ * required locking for access state structures. Drivers must ensure that it is
+ * safe to access the returned state structure through other means. One common
+ * example is when planes are fixed to a single CRTC, and the driver knows that
+ * the CRTC lock is held already. In that case holding the CRTC lock gives a
+ * read-lock on all planes connected to that CRTC. But if planes can be
+ * reassigned things get more tricky. In that case it's better to use
+ * drm_atomic_get_plane_state and wire up full error handling.
+ *
+ * Returns:
+ *
+ * Read-only pointer to the current plane state.
+ */
+static inline const struct drm_plane_state *
+__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
+                                    struct drm_plane *plane)
+{
+       if (state->planes[drm_plane_index(plane)].state)
+               return state->planes[drm_plane_index(plane)].state;
+
+       return plane->state;
 }
 
 int __must_check
@@ -139,29 +181,39 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
 int __must_check drm_atomic_commit(struct drm_atomic_state *state);
 int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
 
-#define for_each_connector_in_state(state, connector, connector_state, __i) \
+#define for_each_connector_in_state(__state, connector, connector_state, __i) \
        for ((__i) = 0;                                                 \
-            (__i) < (state)->num_connector &&                          \
-            ((connector) = (state)->connectors[__i],                   \
-            (connector_state) = (state)->connector_states[__i], 1);    \
+            (__i) < (__state)->num_connector &&                                \
+            ((connector) = (__state)->connectors[__i].ptr,                     \
+            (connector_state) = (__state)->connectors[__i].state, 1);  \
             (__i)++)                                                   \
                for_each_if (connector)
 
-#define for_each_crtc_in_state(state, crtc, crtc_state, __i)   \
+#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
        for ((__i) = 0;                                         \
-            (__i) < (state)->dev->mode_config.num_crtc &&      \
-            ((crtc) = (state)->crtcs[__i],                     \
-            (crtc_state) = (state)->crtc_states[__i], 1);      \
+            (__i) < (__state)->dev->mode_config.num_crtc &&    \
+            ((crtc) = (__state)->crtcs[__i].ptr,                       \
+            (crtc_state) = (__state)->crtcs[__i].state, 1);    \
             (__i)++)                                           \
                for_each_if (crtc_state)
 
-#define for_each_plane_in_state(state, plane, plane_state, __i)                \
+#define for_each_plane_in_state(__state, plane, plane_state, __i)              \
        for ((__i) = 0;                                                 \
-            (__i) < (state)->dev->mode_config.num_total_plane &&       \
-            ((plane) = (state)->planes[__i],                           \
-            (plane_state) = (state)->plane_states[__i], 1);            \
+            (__i) < (__state)->dev->mode_config.num_total_plane &&     \
+            ((plane) = (__state)->planes[__i].ptr,                             \
+            (plane_state) = (__state)->planes[__i].state, 1);          \
             (__i)++)                                                   \
                for_each_if (plane_state)
+
+/**
+ * drm_atomic_crtc_needs_modeset - compute combined modeset need
+ * @state: &drm_crtc_state for the CRTC
+ *
+ * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
+ * whether the state CRTC changed enough to need a full modeset cycle:
+ * connectors_changed, mode_changed and active_change. This helper simply
+ * combines these three to compute the overall need for a modeset for @state.
+ */
 static inline bool
 drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)
 {
index d473dcc91f540878687d9c0c5fdf25bf56afe5a4..d86ae5dcd7b495fb203ec096068c09d749ab97d1 100644 (file)
@@ -38,6 +38,7 @@ int drm_atomic_helper_check_planes(struct drm_device *dev,
                               struct drm_atomic_state *state);
 int drm_atomic_helper_check(struct drm_device *dev,
                            struct drm_atomic_state *state);
+void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
 int drm_atomic_helper_commit(struct drm_device *dev,
                             struct drm_atomic_state *state,
                             bool nonblock);
@@ -71,8 +72,15 @@ void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_sta
 void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc,
                                              bool atomic);
 
-void drm_atomic_helper_swap_state(struct drm_device *dev,
-                                 struct drm_atomic_state *state);
+void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
+                                 bool stall);
+
+/* nonblocking commit helpers */
+int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
+                                  bool nonblock);
+void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
+void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
+void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
 
 /* implementations for legacy interfaces */
 int drm_atomic_helper_update_plane(struct drm_plane *plane,
@@ -147,9 +155,9 @@ void
 __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
 void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
                                          struct drm_connector_state *state);
-void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
-                                       u16 *red, u16 *green, u16 *blue,
-                                       uint32_t start, uint32_t size);
+int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+                                      u16 *red, u16 *green, u16 *blue,
+                                      uint32_t size);
 
 /**
  * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
@@ -159,7 +167,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
  * This iterates over the current state, useful (for example) when applying
  * atomic state after it has been checked and swapped.  To iterate over the
  * planes which *will* be attached (for ->atomic_check()) see
- * drm_crtc_for_each_pending_plane()
+ * drm_atomic_crtc_state_for_each_plane().
  */
 #define drm_atomic_crtc_for_each_plane(plane, crtc) \
        drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
@@ -171,11 +179,31 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
  *
  * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
  * attached if the specified state is applied.  Useful during (for example)
- * ->atomic_check() operations, to validate the incoming state
+ * ->atomic_check() operations, to validate the incoming state.
  */
 #define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
        drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
 
+/**
+ * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
+ * @plane: the loop cursor
+ * @plane_state: loop cursor for the plane's state, must be const
+ * @crtc_state: the incoming crtc-state
+ *
+ * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
+ * attached if the specified state is applied.  Useful during (for example)
+ * ->atomic_check() operations, to validate the incoming state.
+ *
+ * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
+ * const plane_state. This is useful when a driver just wants to peek at other
+ * active planes on this crtc, but does not need to change it.
+ */
+#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \
+       drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \
+               for_each_if ((plane_state = \
+                             __drm_atomic_get_current_plane_state((crtc_state)->state, \
+                                                                  plane)))
+
 /*
  * drm_atomic_plane_disabling - check whether a plane is being disabled
  * @plane: plane object
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
new file mode 100644 (file)
index 0000000..610223b
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Internal Header for the Direct Rendering Manager
+ *
+ * Copyright 2016 Intel Corporation
+ *
+ * Author: Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_AUTH_H_
+#define _DRM_AUTH_H_
+
+/**
+ * struct drm_master - drm master structure
+ *
+ * @refcount: Refcount for this master object.
+ * @dev: Link back to the DRM device
+ * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
+ * @unique_len: Length of unique field. Protected by drm_global_mutex.
+ * @magic_map: Map of used authentication tokens. Protected by struct_mutex.
+ * @lock: DRI lock information.
+ * @driver_priv: Pointer to driver-private information.
+ *
+ * Note that master structures are only relevant for the legacy/primary device
+ * nodes, hence there can only be one per device, not one per drm_minor.
+ */
+struct drm_master {
+       struct kref refcount;
+       struct drm_device *dev;
+       char *unique;
+       int unique_len;
+       struct idr magic_map;
+       struct drm_lock_data lock;
+       void *driver_priv;
+};
+
+struct drm_master *drm_master_get(struct drm_master *master);
+void drm_master_put(struct drm_master **master);
+bool drm_is_current_master(struct drm_file *fpriv);
+
+#endif
index d1559cd04e3de85849eee8df7189f4ce2e09cad4..3edeaf88ebc030cafcf3314c35a18bffbd4df3bc 100644 (file)
@@ -44,6 +44,7 @@ struct drm_file;
 struct drm_clip_rect;
 struct device_node;
 struct fence;
+struct edid;
 
 struct drm_mode_object {
        uint32_t id;
@@ -253,6 +254,8 @@ struct drm_framebuffer {
        int bits_per_pixel;
        int flags;
        uint32_t pixel_format; /* fourcc format */
+       int hot_x;
+       int hot_y;
        struct list_head filp_head;
 };
 
@@ -314,6 +317,7 @@ struct drm_plane_helper_funcs;
  *     update to ensure framebuffer cleanup isn't done too early
  * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
  * @mode: current mode timings
+ * @mode_blob: &drm_property_blob for @mode
  * @degamma_lut: Lookup table for converting framebuffer pixel data
  *     before apply the conversion matrix
  * @ctm: Transformation matrix
@@ -478,8 +482,8 @@ struct drm_crtc_funcs {
         * going on, which should eventually be unified to just one set of
         * hooks.
         */
-       void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
-                         uint32_t start, uint32_t size);
+       int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+                        uint32_t size);
 
        /**
         * @destroy:
@@ -701,6 +705,32 @@ struct drm_crtc_funcs {
                                   const struct drm_crtc_state *state,
                                   struct drm_property *property,
                                   uint64_t *val);
+
+       /**
+        * @late_register:
+        *
+        * This optional hook can be used to register additional userspace
+        * interfaces attached to the crtc like debugfs interfaces.
+        * It is called late in the driver load sequence from drm_dev_register().
+        * Everything added from this callback should be unregistered in
+        * the early_unregister callback.
+        *
+        * Returns:
+        *
+        * 0 on success, or a negative error code on failure.
+        */
+       int (*late_register)(struct drm_crtc *crtc);
+
+       /**
+        * @early_unregister:
+        *
+        * This optional hook should be used to unregister the additional
+        * userspace interfaces attached to the crtc from
+        * late_unregister(). It is called from drm_dev_unregister(),
+        * early in the driver unload sequence to disable userspace access
+        * before data structures are torndown.
+        */
+       void (*early_unregister)(struct drm_crtc *crtc);
 };
 
 /**
@@ -708,6 +738,7 @@ struct drm_crtc_funcs {
  * @dev: parent DRM device
  * @port: OF node used by drm_of_find_possible_crtcs()
  * @head: list management
+ * @name: human readable name, can be overwritten by the driver
  * @mutex: per-CRTC locking
  * @base: base KMS object for ID tracking etc.
  * @primary: primary plane for this CRTC
@@ -724,9 +755,6 @@ struct drm_crtc_funcs {
  * @gamma_store: gamma ramp values
  * @helper_private: mid-layer private data
  * @properties: property tracking for this CRTC
- * @state: current atomic state for this CRTC
- * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
- *     legacy IOCTLs
  *
  * Each CRTC may have one or more connectors associated with it.  This structure
  * allows the CRTC to be controlled.
@@ -738,12 +766,13 @@ struct drm_crtc {
 
        char *name;
 
-       /*
-        * crtc mutex
+       /**
+        * @mutex:
         *
         * This provides a read lock for the overall crtc state (mode, dpms
         * state, ...) and a write lock for everything which can be update
-        * without a full modeset (fb, cursor data, ...)
+        * without a full modeset (fb, cursor data, crtc properties ...). Full
+        * modeset also need to grab dev->mode_config.connection_mutex.
         */
        struct drm_modeset_lock mutex;
 
@@ -753,6 +782,12 @@ struct drm_crtc {
        struct drm_plane *primary;
        struct drm_plane *cursor;
 
+       /**
+        * @index: Position inside the mode_config.list, can be used as an array
+        * index. It is invariant over the lifetime of the CRTC.
+        */
+       unsigned index;
+
        /* position of cursor plane on crtc */
        int cursor_x;
        int cursor_y;
@@ -779,11 +814,37 @@ struct drm_crtc {
 
        struct drm_object_properties properties;
 
+       /**
+        * @state:
+        *
+        * Current atomic state for this CRTC.
+        */
        struct drm_crtc_state *state;
 
-       /*
-        * For legacy crtc IOCTLs so that atomic drivers can get at the locking
-        * acquire context.
+       /**
+        * @commit_list:
+        *
+        * List of &drm_crtc_commit structures tracking pending commits.
+        * Protected by @commit_lock. This list doesn't hold its own full
+        * reference, but burrows it from the ongoing commit. Commit entries
+        * must be removed from this list once the commit is fully completed,
+        * but before it's correspoding &drm_atomic_state gets destroyed.
+        */
+       struct list_head commit_list;
+
+       /**
+        * @commit_lock:
+        *
+        * Spinlock to protect @commit_list.
+        */
+       spinlock_t commit_lock;
+
+       /**
+        * @acquire_ctx:
+        *
+        * Per-CRTC implicit acquire context used by atomic drivers for legacy
+        * IOCTLs, so that atomic drivers can get at the locking acquire
+        * context.
         */
        struct drm_modeset_acquire_ctx *acquire_ctx;
 };
@@ -925,6 +986,33 @@ struct drm_connector_funcs {
        int (*set_property)(struct drm_connector *connector, struct drm_property *property,
                             uint64_t val);
 
+       /**
+        * @late_register:
+        *
+        * This optional hook can be used to register additional userspace
+        * interfaces attached to the connector, light backlight control, i2c,
+        * DP aux or similar interfaces. It is called late in the driver load
+        * sequence from drm_connector_register() when registering all the
+        * core drm connector interfaces. Everything added from this callback
+        * should be unregistered in the early_unregister callback.
+        *
+        * Returns:
+        *
+        * 0 on success, or a negative error code on failure.
+        */
+       int (*late_register)(struct drm_connector *connector);
+
+       /**
+        * @early_unregister:
+        *
+        * This optional hook should be used to unregister the additional
+        * userspace interfaces attached to the connector from
+        * late_unregister(). It is called from drm_connector_unregister(),
+        * early in the driver unload sequence to disable userspace access
+        * before data structures are torndown.
+        */
+       void (*early_unregister)(struct drm_connector *connector);
+
        /**
         * @destroy:
         *
@@ -1069,6 +1157,32 @@ struct drm_encoder_funcs {
         * hotplugged in DRM.
         */
        void (*destroy)(struct drm_encoder *encoder);
+
+       /**
+        * @late_register:
+        *
+        * This optional hook can be used to register additional userspace
+        * interfaces attached to the encoder like debugfs interfaces.
+        * It is called late in the driver load sequence from drm_dev_register().
+        * Everything added from this callback should be unregistered in
+        * the early_unregister callback.
+        *
+        * Returns:
+        *
+        * 0 on success, or a negative error code on failure.
+        */
+       int (*late_register)(struct drm_encoder *encoder);
+
+       /**
+        * @early_unregister:
+        *
+        * This optional hook should be used to unregister the additional
+        * userspace interfaces attached to the encoder from
+        * late_unregister(). It is called from drm_dev_unregister(),
+        * early in the driver unload sequence to disable userspace access
+        * before data structures are torndown.
+        */
+       void (*early_unregister)(struct drm_encoder *encoder);
 };
 
 #define DRM_CONNECTOR_MAX_ENCODER 3
@@ -1078,7 +1192,7 @@ struct drm_encoder_funcs {
  * @dev: parent DRM device
  * @head: list management
  * @base: base KMS object
- * @name: encoder name
+ * @name: human readable name, can be overwritten by the driver
  * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
  * @possible_crtcs: bitmask of potential CRTC bindings
  * @possible_clones: bitmask of potential sibling encoders for cloning
@@ -1097,6 +1211,13 @@ struct drm_encoder {
        struct drm_mode_object base;
        char *name;
        int encoder_type;
+
+       /**
+        * @index: Position inside the mode_config.list, can be used as an array
+        * index. It is invariant over the lifetime of the encoder.
+        */
+       unsigned index;
+
        uint32_t possible_crtcs;
        uint32_t possible_clones;
 
@@ -1124,12 +1245,13 @@ struct drm_encoder {
  * @attr: sysfs attributes
  * @head: list management
  * @base: base KMS object
- * @name: connector name
+ * @name: human readable name, can be overwritten by the driver
  * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
  * @connector_type_id: index into connector type enum
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
  * @stereo_allowed: can this connector handle stereo modes?
+ * @registered: is this connector exposed (registered) with userspace?
  * @modes: modes available on this connector (from fill_modes() + user)
  * @status: one of the drm_connector_status enums (connected, not, or unknown)
  * @probed_modes: list of modes derived directly from the display
@@ -1137,7 +1259,6 @@ struct drm_encoder {
  * @funcs: connector control functions
  * @edid_blob_ptr: DRM property containing EDID if present
  * @properties: property tracking for this connector
- * @path_blob_ptr: DRM blob property data for the DP MST path property
  * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
  * @dpms: current dpms state
  * @helper_private: mid-layer private data
@@ -1181,12 +1302,21 @@ struct drm_connector {
        struct drm_mode_object base;
 
        char *name;
-       int connector_id;
+
+       /**
+        * @index: Compacted connector index, which matches the position inside
+        * the mode_config.list for drivers not supporting hot-add/removing. Can
+        * be used as an array index. It is invariant over the lifetime of the
+        * connector.
+        */
+       unsigned index;
+
        int connector_type;
        int connector_type_id;
        bool interlace_allowed;
        bool doublescan_allowed;
        bool stereo_allowed;
+       bool registered;
        struct list_head modes; /* list of modes on this connector */
 
        enum drm_connector_status status;
@@ -1200,8 +1330,23 @@ struct drm_connector {
        struct drm_property_blob *edid_blob_ptr;
        struct drm_object_properties properties;
 
+       /**
+        * @path_blob_ptr:
+        *
+        * DRM blob property data for the DP MST path property.
+        */
        struct drm_property_blob *path_blob_ptr;
 
+       /**
+        * @tile_blob_ptr:
+        *
+        * DRM blob property data for the tile property (used mostly by DP MST).
+        * This is meant for screens which are driven through separate display
+        * pipelines represented by &drm_crtc, which might not be running with
+        * genlocked clocks. For tiled panels which are genlocked, like
+        * dual-link LVDS or dual-link DSI, the driver should try to not expose
+        * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+        */
        struct drm_property_blob *tile_blob_ptr;
 
        uint8_t polled; /* DRM_CONNECTOR_POLL_* */
@@ -1263,6 +1408,7 @@ struct drm_connector {
  *     plane (in 16.16)
  * @src_w: width of visible portion of plane (in 16.16)
  * @src_h: height of visible portion of plane (in 16.16)
+ * @rotation: rotation of the plane
  * @state: backpointer to global drm_atomic_state
  */
 struct drm_plane_state {
@@ -1490,6 +1636,31 @@ struct drm_plane_funcs {
                                   const struct drm_plane_state *state,
                                   struct drm_property *property,
                                   uint64_t *val);
+       /**
+        * @late_register:
+        *
+        * This optional hook can be used to register additional userspace
+        * interfaces attached to the plane like debugfs interfaces.
+        * It is called late in the driver load sequence from drm_dev_register().
+        * Everything added from this callback should be unregistered in
+        * the early_unregister callback.
+        *
+        * Returns:
+        *
+        * 0 on success, or a negative error code on failure.
+        */
+       int (*late_register)(struct drm_plane *plane);
+
+       /**
+        * @early_unregister:
+        *
+        * This optional hook should be used to unregister the additional
+        * userspace interfaces attached to the plane from
+        * late_unregister(). It is called from drm_dev_unregister(),
+        * early in the driver unload sequence to disable userspace access
+        * before data structures are torndown.
+        */
+       void (*early_unregister)(struct drm_plane *plane);
 };
 
 enum drm_plane_type {
@@ -1503,6 +1674,7 @@ enum drm_plane_type {
  * struct drm_plane - central DRM plane control structure
  * @dev: DRM device this plane belongs to
  * @head: for list management
+ * @name: human readable name, can be overwritten by the driver
  * @base: base mode object
  * @possible_crtcs: pipes this plane can be bound to
  * @format_types: array of formats supported by this plane
@@ -1516,6 +1688,7 @@ enum drm_plane_type {
  * @properties: property tracking for this plane
  * @type: type of plane (overlay, primary, cursor)
  * @state: current atomic state for this plane
+ * @helper_private: mid-layer private data
  */
 struct drm_plane {
        struct drm_device *dev;
@@ -1523,6 +1696,13 @@ struct drm_plane {
 
        char *name;
 
+       /**
+        * @mutex:
+        *
+        * Protects modeset plane state, together with the mutex of &drm_crtc
+        * this plane is linked to (when active, getting actived or getting
+        * disabled).
+        */
        struct drm_modeset_lock mutex;
 
        struct drm_mode_object base;
@@ -1543,6 +1723,12 @@ struct drm_plane {
 
        enum drm_plane_type type;
 
+       /**
+        * @index: Position inside the mode_config.list, can be used as an array
+        * index. It is invariant over the lifetime of the plane.
+        */
+       unsigned index;
+
        const struct drm_plane_helper_funcs *helper_private;
 
        struct drm_plane_state *state;
@@ -1693,19 +1879,137 @@ struct drm_bridge {
        void *driver_private;
 };
 
+/**
+ * struct drm_crtc_commit - track modeset commits on a CRTC
+ *
+ * This structure is used to track pending modeset changes and atomic commit on
+ * a per-CRTC basis. Since updating the list should never block this structure
+ * is reference counted to allow waiters to safely wait on an event to complete,
+ * without holding any locks.
+ *
+ * It has 3 different events in total to allow a fine-grained synchronization
+ * between outstanding updates::
+ *
+ *     atomic commit thread                    hardware
+ *
+ *     write new state into hardware   ---->   ...
+ *     signal hw_done
+ *                                             switch to new state on next
+ *     ...                                     v/hblank
+ *
+ *     wait for buffers to show up             ...
+ *
+ *     ...                                     send completion irq
+ *                                             irq handler signals flip_done
+ *     cleanup old buffers
+ *
+ *     signal cleanup_done
+ *
+ *     wait for flip_done              <----
+ *     clean up atomic state
+ *
+ * The important bit to know is that cleanup_done is the terminal event, but the
+ * ordering between flip_done and hw_done is entirely up to the specific driver
+ * and modeset state change.
+ *
+ * For an implementation of how to use this look at
+ * drm_atomic_helper_setup_commit() from the atomic helper library.
+ */
+struct drm_crtc_commit {
+       /**
+        * @crtc:
+        *
+        * DRM CRTC for this commit.
+        */
+       struct drm_crtc *crtc;
+
+       /**
+        * @ref:
+        *
+        * Reference count for this structure. Needed to allow blocking on
+        * completions without the risk of the completion disappearing
+        * meanwhile.
+        */
+       struct kref ref;
+
+       /**
+        * @flip_done:
+        *
+        * Will be signaled when the hardware has flipped to the new set of
+        * buffers. Signals at the same time as when the drm event for this
+        * commit is sent to userspace, or when an out-fence is singalled. Note
+        * that for most hardware, in most cases this happens after @hw_done is
+        * signalled.
+        */
+       struct completion flip_done;
+
+       /**
+        * @hw_done:
+        *
+        * Will be signalled when all hw register changes for this commit have
+        * been written out. Especially when disabling a pipe this can be much
+        * later than than @flip_done, since that can signal already when the
+        * screen goes black, whereas to fully shut down a pipe more register
+        * I/O is required.
+        *
+        * Note that this does not need to include separately reference-counted
+        * resources like backing storage buffer pinning, or runtime pm
+        * management.
+        */
+       struct completion hw_done;
+
+       /**
+        * @cleanup_done:
+        *
+        * Will be signalled after old buffers have been cleaned up by calling
+        * drm_atomic_helper_cleanup_planes(). Since this can only happen after
+        * a vblank wait completed it might be a bit later. This completion is
+        * useful to throttle updates and avoid hardware updates getting ahead
+        * of the buffer cleanup too much.
+        */
+       struct completion cleanup_done;
+
+       /**
+        * @commit_entry:
+        *
+        * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock.
+        */
+       struct list_head commit_entry;
+
+       /**
+        * @event:
+        *
+        * &drm_pending_vblank_event pointer to clean up private events.
+        */
+       struct drm_pending_vblank_event *event;
+};
+
+struct __drm_planes_state {
+       struct drm_plane *ptr;
+       struct drm_plane_state *state;
+};
+
+struct __drm_crtcs_state {
+       struct drm_crtc *ptr;
+       struct drm_crtc_state *state;
+       struct drm_crtc_commit *commit;
+};
+
+struct __drm_connnectors_state {
+       struct drm_connector *ptr;
+       struct drm_connector_state *state;
+};
+
 /**
  * struct drm_atomic_state - the global state object for atomic updates
  * @dev: parent DRM device
  * @allow_modeset: allow full modeset
  * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
  * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
- * @planes: pointer to array of plane pointers
- * @plane_states: pointer to array of plane states pointers
+ * @planes: pointer to array of structures with per-plane data
  * @crtcs: pointer to array of CRTC pointers
- * @crtc_states: pointer to array of CRTC states pointers
  * @num_connector: size of the @connectors and @connector_states arrays
- * @connectors: pointer to array of connector pointers
- * @connector_states: pointer to array of connector states pointers
+ * @connectors: pointer to array of structures with per-connector data
  * @acquire_ctx: acquire context for this atomic modeset state update
  */
 struct drm_atomic_state {
@@ -1713,15 +2017,20 @@ struct drm_atomic_state {
        bool allow_modeset : 1;
        bool legacy_cursor_update : 1;
        bool legacy_set_config : 1;
-       struct drm_plane **planes;
-       struct drm_plane_state **plane_states;
-       struct drm_crtc **crtcs;
-       struct drm_crtc_state **crtc_states;
+       struct __drm_planes_state *planes;
+       struct __drm_crtcs_state *crtcs;
        int num_connector;
-       struct drm_connector **connectors;
-       struct drm_connector_state **connector_states;
+       struct __drm_connnectors_state *connectors;
 
        struct drm_modeset_acquire_ctx *acquire_ctx;
+
+       /**
+        * @commit_work:
+        *
+        * Work item which can be used by the driver or helpers to execute the
+        * commit without blocking.
+        */
+       struct work_struct commit_work;
 };
 
 
@@ -2022,13 +2331,9 @@ struct drm_mode_config_funcs {
  * @connection_mutex: ww mutex protecting connector state and routing
  * @acquire_ctx: global implicit acquire context used by atomic drivers for
  *     legacy IOCTLs
- * @idr_mutex: mutex for KMS ID allocation and management
- * @crtc_idr: main KMS ID tracking object
  * @fb_lock: mutex to protect fb state and lists
  * @num_fb: number of fbs available
  * @fb_list: list of framebuffers available
- * @num_connector: number of connectors on this device
- * @connector_list: list of connector objects
  * @num_encoder: number of encoders on this device
  * @encoder_list: list of encoder objects
  * @num_overlay_plane: number of overlay planes on this device
@@ -2045,24 +2350,16 @@ struct drm_mode_config_funcs {
  * @fb_base: base address of the framebuffer
  * @poll_enabled: track polling support for this device
  * @poll_running: track polling status for this device
+ * @delayed_event: track delayed poll uevent deliver for this device
  * @output_poll_work: delayed work for polling in process context
  * @property_blob_list: list of all the blob property objects
  * @blob_lock: mutex for blob property allocation and management
  * @*_property: core property tracking
- * @degamma_lut_property: LUT used to convert the framebuffer's colors to linear
- *     gamma
- * @degamma_lut_size_property: size of the degamma LUT as supported by the
- *     driver (read-only)
- * @ctm_property: Matrix used to convert colors after the lookup in the
- *     degamma LUT
- * @gamma_lut_property: LUT used to convert the colors, after the CSC matrix, to
- *     the gamma space of the connected screen (read-only)
- * @gamma_lut_size_property: size of the gamma LUT as supported by the driver
  * @preferred_depth: preferred RBG pixel depth, used by fb helpers
  * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
- * @async_page_flip: does this device support async flips on the primary plane?
  * @cursor_width: hint to userspace for max cursor width
  * @cursor_height: hint to userspace for max cursor height
+ * @helper_private: mid-layer private data
  *
  * Core mode resource tracking structure.  All CRTC, encoders, and connectors
  * enumerated by the driver are added here, as are global properties.  Some
@@ -2072,17 +2369,46 @@ struct drm_mode_config {
        struct mutex mutex; /* protects configuration (mode lists etc.) */
        struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
        struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
-       struct mutex idr_mutex; /* for IDR management */
-       struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
-       struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
-       /* this is limited to one for now */
+
+       /**
+        * @idr_mutex:
+        *
+        * Mutex for KMS ID allocation and management. Protects both @crtc_idr
+        * and @tile_idr.
+        */
+       struct mutex idr_mutex;
+
+       /**
+        * @crtc_idr:
+        *
+        * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
+        * connector, modes - just makes life easier to have only one.
+        */
+       struct idr crtc_idr;
+
+       /**
+        * @tile_idr:
+        *
+        * Use this idr for allocating new IDs for tiled sinks like use in some
+        * high-res DP MST screens.
+        */
+       struct idr tile_idr;
 
        struct mutex fb_lock; /* proctects global and per-file fb lists */
        int num_fb;
        struct list_head fb_list;
 
+       /**
+        * @num_connector: Number of connectors on this device.
+        */
        int num_connector;
+       /**
+        * @connector_ida: ID allocator for connector indices.
+        */
        struct ida connector_ida;
+       /**
+        * @connector_list: List of connector objects.
+        */
        struct list_head connector_list;
        int num_encoder;
        struct list_head encoder_list;
@@ -2117,71 +2443,251 @@ struct drm_mode_config {
 
        /* pointers to standard properties */
        struct list_head property_blob_list;
+       /**
+        * @edid_property: Default connector property to hold the EDID of the
+        * currently connected sink, if any.
+        */
        struct drm_property *edid_property;
+       /**
+        * @dpms_property: Default connector property to control the
+        * connector's DPMS state.
+        */
        struct drm_property *dpms_property;
+       /**
+        * @path_property: Default connector property to hold the DP MST path
+        * for the port.
+        */
        struct drm_property *path_property;
+       /**
+        * @tile_property: Default connector property to store the tile
+        * position of a tiled screen, for sinks which need to be driven with
+        * multiple CRTCs.
+        */
        struct drm_property *tile_property;
+       /**
+        * @plane_type_property: Default plane property to differentiate
+        * CURSOR, PRIMARY and OVERLAY legacy uses of planes.
+        */
        struct drm_property *plane_type_property;
+       /**
+        * @rotation_property: Optional property for planes or CRTCs to specifiy
+        * rotation.
+        */
        struct drm_property *rotation_property;
+       /**
+        * @prop_src_x: Default atomic plane property for the plane source
+        * position in the connected &drm_framebuffer.
+        */
        struct drm_property *prop_src_x;
+       /**
+        * @prop_src_y: Default atomic plane property for the plane source
+        * position in the connected &drm_framebuffer.
+        */
        struct drm_property *prop_src_y;
+       /**
+        * @prop_src_w: Default atomic plane property for the plane source
+        * position in the connected &drm_framebuffer.
+        */
        struct drm_property *prop_src_w;
+       /**
+        * @prop_src_h: Default atomic plane property for the plane source
+        * position in the connected &drm_framebuffer.
+        */
        struct drm_property *prop_src_h;
+       /**
+        * @prop_crtc_x: Default atomic plane property for the plane destination
+        * position in the &drm_crtc is is being shown on.
+        */
        struct drm_property *prop_crtc_x;
+       /**
+        * @prop_crtc_y: Default atomic plane property for the plane destination
+        * position in the &drm_crtc is is being shown on.
+        */
        struct drm_property *prop_crtc_y;
+       /**
+        * @prop_crtc_w: Default atomic plane property for the plane destination
+        * position in the &drm_crtc is is being shown on.
+        */
        struct drm_property *prop_crtc_w;
+       /**
+        * @prop_crtc_h: Default atomic plane property for the plane destination
+        * position in the &drm_crtc is is being shown on.
+        */
        struct drm_property *prop_crtc_h;
+       /**
+        * @prop_fb_id: Default atomic plane property to specify the
+        * &drm_framebuffer.
+        */
        struct drm_property *prop_fb_id;
+       /**
+        * @prop_crtc_id: Default atomic plane property to specify the
+        * &drm_crtc.
+        */
        struct drm_property *prop_crtc_id;
+       /**
+        * @prop_active: Default atomic CRTC property to control the active
+        * state, which is the simplified implementation for DPMS in atomic
+        * drivers.
+        */
        struct drm_property *prop_active;
+       /**
+        * @prop_mode_id: Default atomic CRTC property to set the mode for a
+        * CRTC. A 0 mode implies that the CRTC is entirely disabled - all
+        * connectors must be of and active must be set to disabled, too.
+        */
        struct drm_property *prop_mode_id;
 
-       /* DVI-I properties */
+       /**
+        * @dvi_i_subconnector_property: Optional DVI-I property to
+        * differentiate between analog or digital mode.
+        */
        struct drm_property *dvi_i_subconnector_property;
+       /**
+        * @dvi_i_select_subconnector_property: Optional DVI-I property to
+        * select between analog or digital mode.
+        */
        struct drm_property *dvi_i_select_subconnector_property;
 
-       /* TV properties */
+       /**
+        * @tv_subconnector_property: Optional TV property to differentiate
+        * between different TV connector types.
+        */
        struct drm_property *tv_subconnector_property;
+       /**
+        * @tv_select_subconnector_property: Optional TV property to select
+        * between different TV connector types.
+        */
        struct drm_property *tv_select_subconnector_property;
+       /**
+        * @tv_mode_property: Optional TV property to select
+        * the output TV mode.
+        */
        struct drm_property *tv_mode_property;
+       /**
+        * @tv_left_margin_property: Optional TV property to set the left
+        * margin.
+        */
        struct drm_property *tv_left_margin_property;
+       /**
+        * @tv_right_margin_property: Optional TV property to set the right
+        * margin.
+        */
        struct drm_property *tv_right_margin_property;
+       /**
+        * @tv_top_margin_property: Optional TV property to set the right
+        * margin.
+        */
        struct drm_property *tv_top_margin_property;
+       /**
+        * @tv_bottom_margin_property: Optional TV property to set the right
+        * margin.
+        */
        struct drm_property *tv_bottom_margin_property;
+       /**
+        * @tv_brightness_property: Optional TV property to set the
+        * brightness.
+        */
        struct drm_property *tv_brightness_property;
+       /**
+        * @tv_contrast_property: Optional TV property to set the
+        * contrast.
+        */
        struct drm_property *tv_contrast_property;
+       /**
+        * @tv_flicker_reduction_property: Optional TV property to control the
+        * flicker reduction mode.
+        */
        struct drm_property *tv_flicker_reduction_property;
+       /**
+        * @tv_overscan_property: Optional TV property to control the overscan
+        * setting.
+        */
        struct drm_property *tv_overscan_property;
+       /**
+        * @tv_saturation_property: Optional TV property to set the
+        * saturation.
+        */
        struct drm_property *tv_saturation_property;
+       /**
+        * @tv_hue_property: Optional TV property to set the hue.
+        */
        struct drm_property *tv_hue_property;
 
-       /* Optional properties */
+       /**
+        * @scaling_mode_property: Optional connector property to control the
+        * upscaling, mostly used for built-in panels.
+        */
        struct drm_property *scaling_mode_property;
+       /**
+        * @aspect_ratio_property: Optional connector property to control the
+        * HDMI infoframe aspect ratio setting.
+        */
        struct drm_property *aspect_ratio_property;
+       /**
+        * @dirty_info_property: Optional connector property to give userspace a
+        * hint that the DIRTY_FB ioctl should be used.
+        */
        struct drm_property *dirty_info_property;
 
-       /* Optional color correction properties */
+       /**
+        * @degamma_lut_property: Optional CRTC property to set the LUT used to
+        * convert the framebuffer's colors to linear gamma.
+        */
        struct drm_property *degamma_lut_property;
+       /**
+        * @degamma_lut_size_property: Optional CRTC property for the size of
+        * the degamma LUT as supported by the driver (read-only).
+        */
        struct drm_property *degamma_lut_size_property;
+       /**
+        * @ctm_property: Optional CRTC property to set the
+        * matrix used to convert colors after the lookup in the
+        * degamma LUT.
+        */
        struct drm_property *ctm_property;
+       /**
+        * @gamma_lut_property: Optional CRTC property to set the LUT used to
+        * convert the colors, after the CTM matrix, to the gamma space of the
+        * connected screen.
+        */
        struct drm_property *gamma_lut_property;
+       /**
+        * @gamma_lut_size_property: Optional CRTC property for the size of the
+        * gamma LUT as supported by the driver (read-only).
+        */
        struct drm_property *gamma_lut_size_property;
 
-       /* properties for virtual machine layout */
+       /**
+        * @suggested_x_property: Optional connector property with a hint for
+        * the position of the output on the host's screen.
+        */
        struct drm_property *suggested_x_property;
+       /**
+        * @suggested_y_property: Optional connector property with a hint for
+        * the position of the output on the host's screen.
+        */
        struct drm_property *suggested_y_property;
 
        /* dumb ioctl parameters */
        uint32_t preferred_depth, prefer_shadow;
 
-       /* whether async page flip is supported or not */
+       /**
+        * @async_page_flip: Does this device support async flips on the primary
+        * plane?
+        */
        bool async_page_flip;
 
-       /* whether the driver supports fb modifiers */
+       /**
+        * @allow_fb_modifiers:
+        *
+        * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+        */
        bool allow_fb_modifiers;
 
        /* cursor size */
        uint32_t cursor_width, cursor_height;
+
+       struct drm_mode_config_helper_funcs *helper_private;
 };
 
 /**
@@ -2230,7 +2736,18 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
                              const struct drm_crtc_funcs *funcs,
                              const char *name, ...);
 extern void drm_crtc_cleanup(struct drm_crtc *crtc);
-extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
+
+/**
+ * drm_crtc_index - find the index of a registered CRTC
+ * @crtc: CRTC to find index for
+ *
+ * Given a registered CRTC, return the index of that CRTC within a DRM
+ * device's list of CRTCs.
+ */
+static inline unsigned int drm_crtc_index(struct drm_crtc *crtc)
+{
+       return crtc->index;
+}
 
 /**
  * drm_crtc_mask - find the mask of a registered CRTC
@@ -2244,47 +2761,36 @@ static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
        return 1 << drm_crtc_index(crtc);
 }
 
-extern void drm_connector_ida_init(void);
-extern void drm_connector_ida_destroy(void);
-extern int drm_connector_init(struct drm_device *dev,
-                             struct drm_connector *connector,
-                             const struct drm_connector_funcs *funcs,
-                             int connector_type);
+int drm_connector_init(struct drm_device *dev,
+                      struct drm_connector *connector,
+                      const struct drm_connector_funcs *funcs,
+                      int connector_type);
 int drm_connector_register(struct drm_connector *connector);
 void drm_connector_unregister(struct drm_connector *connector);
 
 extern void drm_connector_cleanup(struct drm_connector *connector);
 static inline unsigned drm_connector_index(struct drm_connector *connector)
 {
-       return connector->connector_id;
+       return connector->index;
 }
 
-/* helpers to {un}register all connectors from sysfs for device */
-extern int drm_connector_register_all(struct drm_device *dev);
-extern void drm_connector_unregister_all(struct drm_device *dev);
-
-extern int drm_bridge_add(struct drm_bridge *bridge);
-extern void drm_bridge_remove(struct drm_bridge *bridge);
-extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
-extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
-
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
-                       const struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
-                       struct drm_display_mode *mode,
-                       struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
-
 extern __printf(5, 6)
 int drm_encoder_init(struct drm_device *dev,
                     struct drm_encoder *encoder,
                     const struct drm_encoder_funcs *funcs,
                     int encoder_type, const char *name, ...);
-extern unsigned int drm_encoder_index(struct drm_encoder *encoder);
+
+/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+       return encoder->index;
+}
 
 /**
  * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -2315,17 +2821,24 @@ extern int drm_plane_init(struct drm_device *dev,
                          const uint32_t *formats, unsigned int format_count,
                          bool is_primary);
 extern void drm_plane_cleanup(struct drm_plane *plane);
-extern unsigned int drm_plane_index(struct drm_plane *plane);
+
+/**
+ * drm_plane_index - find the index of a registered plane
+ * @plane: plane to find index for
+ *
+ * Given a registered plane, return the index of that plane within a DRM
+ * device's list of planes.
+ */
+static inline unsigned int drm_plane_index(struct drm_plane *plane)
+{
+       return plane->index;
+}
 extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
 extern void drm_plane_force_disable(struct drm_plane *plane);
-extern int drm_plane_check_pixel_format(const struct drm_plane *plane,
-                                       u32 format);
 extern void drm_crtc_get_hv_timing(const struct drm_display_mode *mode,
                                   int *hdisplay, int *vdisplay);
-extern int drm_crtc_check_viewport(const struct drm_crtc *crtc,
-                                  int x, int y,
-                                  const struct drm_display_mode *mode,
-                                  const struct drm_framebuffer *fb);
+extern int drm_crtc_force_disable(struct drm_crtc *crtc);
+extern int drm_crtc_force_disable_all(struct drm_device *dev);
 
 extern void drm_encoder_cleanup(struct drm_encoder *encoder);
 
@@ -2336,16 +2849,6 @@ extern const char *drm_get_dvi_i_subconnector_name(int val);
 extern const char *drm_get_dvi_i_select_name(int val);
 extern const char *drm_get_tv_subconnector_name(int val);
 extern const char *drm_get_tv_select_name(int val);
-extern void drm_fb_release(struct drm_file *file_priv);
-extern void drm_property_destroy_user_blobs(struct drm_device *dev,
-                                            struct drm_file *file_priv);
-extern bool drm_probe_ddc(struct i2c_adapter *adapter);
-extern struct edid *drm_get_edid(struct drm_connector *connector,
-                                struct i2c_adapter *adapter);
-extern struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
-                                           struct i2c_adapter *adapter);
-extern struct edid *drm_edid_duplicate(const struct edid *edid);
-extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_config_init(struct drm_device *dev);
 extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
@@ -2369,13 +2872,6 @@ static inline bool drm_property_type_is(struct drm_property *property,
        return property->flags & type;
 }
 
-static inline bool drm_property_type_valid(struct drm_property *property)
-{
-       if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
-               return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-       return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
-}
-
 extern int drm_object_property_set_value(struct drm_mode_object *obj,
                                         struct drm_property *property,
                                         uint64_t val);
@@ -2433,86 +2929,15 @@ extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
 extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
 extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
 extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
-extern bool drm_property_change_valid_get(struct drm_property *property,
-                                        uint64_t value, struct drm_mode_object **ref);
-extern void drm_property_change_valid_put(struct drm_property *property,
-               struct drm_mode_object *ref);
 
 extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
                                             struct drm_encoder *encoder);
 extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
                                         int gamma_size);
-extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
-               uint32_t id, uint32_t type);
-void drm_mode_object_reference(struct drm_mode_object *obj);
-void drm_mode_object_unreference(struct drm_mode_object *obj);
 
-/* IOCTLs */
-extern int drm_mode_getresources(struct drm_device *dev,
-                                void *data, struct drm_file *file_priv);
-extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
-                                  struct drm_file *file_priv);
-extern int drm_mode_getcrtc(struct drm_device *dev,
-                           void *data, struct drm_file *file_priv);
-extern int drm_mode_getconnector(struct drm_device *dev,
-                             void *data, struct drm_file *file_priv);
 extern int drm_mode_set_config_internal(struct drm_mode_set *set);
-extern int drm_mode_setcrtc(struct drm_device *dev,
-                           void *data, struct drm_file *file_priv);
-extern int drm_mode_getplane(struct drm_device *dev,
-                              void *data, struct drm_file *file_priv);
-extern int drm_mode_setplane(struct drm_device *dev,
-                              void *data, struct drm_file *file_priv);
-extern int drm_mode_cursor_ioctl(struct drm_device *dev,
-                               void *data, struct drm_file *file_priv);
-extern int drm_mode_cursor2_ioctl(struct drm_device *dev,
-                               void *data, struct drm_file *file_priv);
-extern int drm_mode_addfb(struct drm_device *dev,
-                         void *data, struct drm_file *file_priv);
-extern int drm_mode_addfb2(struct drm_device *dev,
-                          void *data, struct drm_file *file_priv);
+
 extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
-extern int drm_mode_rmfb(struct drm_device *dev,
-                        void *data, struct drm_file *file_priv);
-extern int drm_mode_getfb(struct drm_device *dev,
-                         void *data, struct drm_file *file_priv);
-extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
-                                 void *data, struct drm_file *file_priv);
-
-extern int drm_mode_getproperty_ioctl(struct drm_device *dev,
-                                     void *data, struct drm_file *file_priv);
-extern int drm_mode_getblob_ioctl(struct drm_device *dev,
-                                 void *data, struct drm_file *file_priv);
-extern int drm_mode_createblob_ioctl(struct drm_device *dev,
-                                    void *data, struct drm_file *file_priv);
-extern int drm_mode_destroyblob_ioctl(struct drm_device *dev,
-                                     void *data, struct drm_file *file_priv);
-extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
-                                             void *data, struct drm_file *file_priv);
-extern int drm_mode_getencoder(struct drm_device *dev,
-                              void *data, struct drm_file *file_priv);
-extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
-                                   void *data, struct drm_file *file_priv);
-extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
-                                   void *data, struct drm_file *file_priv);
-extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-extern enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
-extern bool drm_detect_hdmi_monitor(struct edid *edid);
-extern bool drm_detect_monitor_audio(struct edid *edid);
-extern bool drm_rgb_quant_range_selectable(struct edid *edid);
-extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
-                                   void *data, struct drm_file *file_priv);
-extern int drm_add_modes_noedid(struct drm_connector *connector,
-                               int hdisplay, int vdisplay);
-extern void drm_set_preferred_mode(struct drm_connector *connector,
-                                  int hpref, int vpref);
-
-extern int drm_edid_header_is_valid(const u8 *raw_edid);
-extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
-                                bool *edid_corrupt);
-extern bool drm_edid_is_valid(struct edid *edid);
-extern void drm_edid_get_monitor_name(struct edid *edid, char *name,
-                                     int buflen);
 
 extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
                                                         char topology[8]);
@@ -2520,41 +2945,24 @@ extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
                                               char topology[8]);
 extern void drm_mode_put_tile_group(struct drm_device *dev,
                                   struct drm_tile_group *tg);
-struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
-                                          int hsize, int vsize, int fresh,
-                                          bool rb);
 
-extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
-                                     void *data, struct drm_file *file_priv);
-extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
-                                   void *data, struct drm_file *file_priv);
-extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
-                                     void *data, struct drm_file *file_priv);
-extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
-                                            struct drm_file *file_priv);
-extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
-                                          struct drm_file *file_priv);
 extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
                                       struct drm_property *property,
                                       uint64_t value);
-extern int drm_mode_atomic_ioctl(struct drm_device *dev,
-                                void *data, struct drm_file *file_priv);
-
-extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
-                                int *bpp);
-extern int drm_format_num_planes(uint32_t format);
-extern int drm_format_plane_cpp(uint32_t format, int plane);
-extern int drm_format_horz_chroma_subsampling(uint32_t format);
-extern int drm_format_vert_chroma_subsampling(uint32_t format);
-extern int drm_format_plane_width(int width, uint32_t format, int plane);
-extern int drm_format_plane_height(int height, uint32_t format, int plane);
-extern const char *drm_get_format_name(uint32_t format);
+
 extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
                                                              unsigned int supported_rotations);
 extern unsigned int drm_rotation_simplify(unsigned int rotation,
                                          unsigned int supported_rotations);
-
+extern void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+                                      uint degamma_lut_size,
+                                      bool has_ctm,
+                                      uint gamma_lut_size);
 /* Helpers */
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+                                            uint32_t id, uint32_t type);
+void drm_mode_object_reference(struct drm_mode_object *obj);
+void drm_mode_object_unreference(struct drm_mode_object *obj);
 
 static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
                uint32_t id)
@@ -2720,4 +3128,50 @@ assert_drm_connector_list_read_locked(struct drm_mode_config *mode_config)
             &fb->head != (&(dev)->mode_config.fb_list);                        \
             fb = list_next_entry(fb, head))
 
+/* drm_edid.c */
+bool drm_probe_ddc(struct i2c_adapter *adapter);
+struct edid *drm_get_edid(struct drm_connector *connector,
+                         struct i2c_adapter *adapter);
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+                                    struct i2c_adapter *adapter);
+struct edid *drm_edid_duplicate(const struct edid *edid);
+int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+
+u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
+enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
+bool drm_detect_hdmi_monitor(struct edid *edid);
+bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_rgb_quant_range_selectable(struct edid *edid);
+int drm_add_modes_noedid(struct drm_connector *connector,
+                        int hdisplay, int vdisplay);
+void drm_set_preferred_mode(struct drm_connector *connector,
+                           int hpref, int vpref);
+
+int drm_edid_header_is_valid(const u8 *raw_edid);
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
+                         bool *edid_corrupt);
+bool drm_edid_is_valid(struct edid *edid);
+void drm_edid_get_monitor_name(struct edid *edid, char *name,
+                              int buflen);
+struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+                                          int hsize, int vsize, int fresh,
+                                          bool rb);
+
+/* drm_bridge.c */
+extern int drm_bridge_add(struct drm_bridge *bridge);
+extern void drm_bridge_remove(struct drm_bridge *bridge);
+extern struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge);
+
+bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
+                       const struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);
+void drm_bridge_disable(struct drm_bridge *bridge);
+void drm_bridge_post_disable(struct drm_bridge *bridge);
+void drm_bridge_mode_set(struct drm_bridge *bridge,
+                       struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode);
+void drm_bridge_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_enable(struct drm_bridge *bridge);
+
 #endif /* __DRM_CRTC_H__ */
index 97fa894d4ee27757f60b91721023b8b4348bc5e3..4b37afa2b73b4a106d7beef61f7b5be0198fe983 100644 (file)
@@ -48,9 +48,6 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
                                     struct drm_display_mode *mode,
                                     int x, int y,
                                     struct drm_framebuffer *old_fb);
-extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
-                                             int degamma_lut_size,
-                                             int gamma_lut_size);
 extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
 extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
 
index 9d03f167007bc1bbcc6689fe76e9d5355ab02fef..72dee1213268100402fdd91313c353b709507ef8 100644 (file)
@@ -622,6 +622,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
 #define DP_BRANCH_OUI_HEADER_SIZE      0xc
 #define DP_RECEIVER_CAP_SIZE           0xf
 #define EDP_PSR_RECEIVER_CAP_SIZE      2
+#define EDP_DISPLAY_CTL_CAP_SIZE       3
 
 void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
@@ -746,7 +747,14 @@ struct drm_dp_aux {
        struct mutex hw_mutex;
        ssize_t (*transfer)(struct drm_dp_aux *aux,
                            struct drm_dp_aux_msg *msg);
-       unsigned i2c_nack_count, i2c_defer_count;
+       /**
+        * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
+        */
+       unsigned i2c_nack_count;
+       /**
+        * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
+        */
+       unsigned i2c_defer_count;
 };
 
 ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -804,6 +812,7 @@ int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
 int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
 
+void drm_dp_aux_init(struct drm_dp_aux *aux);
 int drm_dp_aux_register(struct drm_dp_aux *aux);
 void drm_dp_aux_unregister(struct drm_dp_aux *aux);
 
index fdb47051d5492a4db126cc525667bd181343d2cd..003207670597095a7a5e7b3b06b177992fbd6e40 100644 (file)
@@ -87,7 +87,15 @@ struct drm_dp_mst_port {
        struct drm_connector *connector;
        struct drm_dp_mst_topology_mgr *mgr;
 
-       struct edid *cached_edid; /* for DP logical ports - make tiling work */
+       /**
+        * @cached_edid: for DP logical ports - make tiling work by ensuring
+        * that the EDID for all connectors is read immediately.
+        */
+       struct edid *cached_edid;
+       /**
+        * @has_audio: Tracks whether the sink connector to this port is
+        * audio-capable.
+        */
        bool has_audio;
 };
 
@@ -397,71 +405,154 @@ struct drm_dp_payload {
 
 /**
  * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
- * @dev: device pointer for adding i2c devices etc.
- * @cbs: callbacks for connector addition and destruction.
- * @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go.
- * @aux: aux channel for the DP connector.
- * @max_payloads: maximum number of payloads the GPU can generate.
- * @conn_base_id: DRM connector ID this mgr is connected to.
- * @down_rep_recv: msg receiver state for down replies.
- * @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, dpcd.
- * @mst_state: if this manager is enabled for an MST capable port.
- * @mst_primary: pointer to the primary branch device.
- * @dpcd: cache of DPCD for primary port.
- * @pbn_div: PBN to slots divisor.
  *
  * This struct represents the toplevel displayport MST topology manager.
  * There should be one instance of this for every MST capable DP connector
  * on the GPU.
  */
 struct drm_dp_mst_topology_mgr {
-
+       /**
+        * @dev: device pointer for adding i2c devices etc.
+        */
        struct device *dev;
+       /**
+        * @cbs: callbacks for connector addition and destruction.
+        */
        const struct drm_dp_mst_topology_cbs *cbs;
+       /**
+        * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
+        * in one go.
+        */
        int max_dpcd_transaction_bytes;
-       struct drm_dp_aux *aux; /* auxch for this topology mgr to use */
+       /**
+        * @aux: AUX channel for the DP MST connector this topolgy mgr is
+        * controlling.
+        */
+       struct drm_dp_aux *aux;
+       /**
+        * @max_payloads: maximum number of payloads the GPU can generate.
+        */
        int max_payloads;
+       /**
+        * @conn_base_id: DRM connector ID this mgr is connected to. Only used
+        * to build the MST connector path value.
+        */
        int conn_base_id;
 
-       /* only ever accessed from the workqueue - which should be serialised */
+       /**
+        * @down_rep_recv: Message receiver state for down replies. This and
+        * @up_req_recv are only ever access from the work item, which is
+        * serialised.
+        */
        struct drm_dp_sideband_msg_rx down_rep_recv;
+       /**
+        * @up_req_recv: Message receiver state for up requests. This and
+        * @down_rep_recv are only ever access from the work item, which is
+        * serialised.
+        */
        struct drm_dp_sideband_msg_rx up_req_recv;
 
-       /* pointer to info about the initial MST device */
-       struct mutex lock; /* protects mst_state + primary + dpcd */
+       /**
+        * @lock: protects mst state, primary, dpcd.
+        */
+       struct mutex lock;
 
+       /**
+        * @mst_state: If this manager is enabled for an MST capable port. False
+        * if no MST sink/branch devices is connected.
+        */
        bool mst_state;
+       /**
+        * @mst_primary: Pointer to the primary/first branch device.
+        */
        struct drm_dp_mst_branch *mst_primary;
 
+       /**
+        * @dpcd: Cache of DPCD for primary port.
+        */
        u8 dpcd[DP_RECEIVER_CAP_SIZE];
+       /**
+        * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
+        */
        u8 sink_count;
+       /**
+        * @pbn_div: PBN to slots divisor.
+        */
        int pbn_div;
+       /**
+        * @total_slots: Total slots that can be allocated.
+        */
        int total_slots;
+       /**
+        * @avail_slots: Still available slots that can be allocated.
+        */
        int avail_slots;
+       /**
+        * @total_pbn: Total PBN count.
+        */
        int total_pbn;
 
-       /* messages to be transmitted */
-       /* qlock protects the upq/downq and in_progress,
-          the mstb tx_slots and txmsg->state once they are queued */
+       /**
+        * @qlock: protects @tx_msg_downq, the tx_slots in struct
+        * &drm_dp_mst_branch and txmsg->state once they are queued
+        */
        struct mutex qlock;
+       /**
+        * @tx_msg_downq: List of pending down replies.
+        */
        struct list_head tx_msg_downq;
-       bool tx_down_in_progress;
 
-       /* payload info + lock for it */
+       /**
+        * @payload_lock: Protect payload information.
+        */
        struct mutex payload_lock;
+       /**
+        * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
+        * VCPI structure itself is embedded into the corresponding
+        * &drm_dp_mst_port structure.
+        */
        struct drm_dp_vcpi **proposed_vcpis;
+       /**
+        * @payloads: Array of payloads.
+        */
        struct drm_dp_payload *payloads;
+       /**
+        * @payload_mask: Elements of @payloads actually in use. Since
+        * reallocation of active outputs isn't possible gaps can be created by
+        * disabling outputs out of order compared to how they've been enabled.
+        */
        unsigned long payload_mask;
+       /**
+        * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
+        */
        unsigned long vcpi_mask;
 
+       /**
+        * @tx_waitq: Wait to queue stall for the tx worker.
+        */
        wait_queue_head_t tx_waitq;
+       /**
+        * @work: Probe work.
+        */
        struct work_struct work;
-
+       /**
+        * @tx_work: Sideband transmit worker. This can nest within the main
+        * @work worker for each transaction @work launches.
+        */
        struct work_struct tx_work;
 
+       /**
+        * @destroy_connector_list: List of to be destroyed connectors.
+        */
        struct list_head destroy_connector_list;
+       /**
+        * @destroy_connector_lock: Protects @connector_list.
+        */
        struct mutex destroy_connector_lock;
+       /**
+        * @destroy_connector_work: Work item to destroy connectors. Needed to
+        * avoid locking inversion.
+        */
        struct work_struct destroy_connector_work;
 };
 
index fd0dde9f0a6d9f6bcbb3c69d14d7934d340f474d..f313211f8ed52f1e1ecf0a7224a8dd3c26715367 100644 (file)
@@ -23,6 +23,7 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
 
 void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
 void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
+void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state);
 int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
        struct drm_fb_helper_surface_size *sizes,
        const struct drm_framebuffer_funcs *funcs);
index 5b4aa35026a3d8dc336f1e9c2c882dea72c3c597..db8d4780eaa27607baf7857111c72b50bbae5fd4 100644 (file)
@@ -212,17 +212,6 @@ struct drm_fb_helper {
         * needs to be reprobe when fbdev is in control again.
         */
        bool delayed_hotplug;
-
-       /**
-        * @atomic:
-        *
-        * Use atomic updates for restore_fbdev_mode(), etc.  This defaults to
-        * true if driver has DRIVER_ATOMIC feature flag, but drivers can
-        * override it to true after drm_fb_helper_init() if they support atomic
-        * modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper
-        * does not require ASYNC commits).
-        */
-       bool atomic;
 };
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
new file mode 100644 (file)
index 0000000..7f90a39
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+#ifndef __DRM_FOURCC_H__
+#define __DRM_FOURCC_H__
+
+#include <linux/types.h>
+#include <uapi/drm/drm_fourcc.h>
+
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
+int drm_format_num_planes(uint32_t format);
+int drm_format_plane_cpp(uint32_t format, int plane);
+int drm_format_horz_chroma_subsampling(uint32_t format);
+int drm_format_vert_chroma_subsampling(uint32_t format);
+int drm_format_plane_width(int width, uint32_t format, int plane);
+int drm_format_plane_height(int height, uint32_t format, int plane);
+const char *drm_get_format_name(uint32_t format);
+
+#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
new file mode 100644 (file)
index 0000000..2401b14
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2016 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_IRQ_H_
+#define _DRM_IRQ_H_
+
+#include <linux/seqlock.h>
+
+/**
+ * struct drm_pending_vblank_event - pending vblank event tracking
+ */
+struct drm_pending_vblank_event {
+       /**
+        * @base: Base structure for tracking pending DRM events.
+        */
+       struct drm_pending_event base;
+       /**
+        * @pipe: drm_crtc_index() of the &drm_crtc this event is for.
+        */
+       unsigned int pipe;
+       /**
+        * @event: Actual event which will be sent to userspace.
+        */
+       struct drm_event_vblank event;
+};
+
+/**
+ * struct drm_vblank_crtc - vblank tracking for a CRTC
+ *
+ * This structure tracks the vblank state for one CRTC.
+ *
+ * Note that for historical reasons - the vblank handling code is still shared
+ * with legacy/non-kms drivers - this is a free-standing structure not directly
+ * connected to struct &drm_crtc. But all public interface functions are taking
+ * a struct &drm_crtc to hide this implementation detail.
+ */
+struct drm_vblank_crtc {
+       /**
+        * @dev: Pointer to the &drm_device.
+        */
+       struct drm_device *dev;
+       /**
+        * @queue: Wait queue for vblank waiters.
+        */
+       wait_queue_head_t queue;        /**< VBLANK wait queue */
+       /**
+        * @disable_timer: Disable timer for the delayed vblank disabling
+        * hysteresis logic. Vblank disabling is controlled through the
+        * drm_vblank_offdelay module option and the setting of the
+        * max_vblank_count value in the &drm_device structure.
+        */
+       struct timer_list disable_timer;
+
+       /**
+        * @seqlock: Protect vblank count and time.
+        */
+       seqlock_t seqlock;              /* protects vblank count and time */
+
+       /**
+        * @count: Current software vblank counter.
+        */
+       u32 count;
+       /**
+        * @time: Vblank timestamp corresponding to @count.
+        */
+       struct timeval time;
+
+       /**
+        * @refcount: Number of users/waiters of the vblank interrupt. Only when
+        * this refcount reaches 0 can the hardware interrupt be disabled using
+        * @disable_timer.
+        */
+       atomic_t refcount;              /* number of users of vblank interruptsper crtc */
+       /**
+        * @last: Protected by dev->vbl_lock, used for wraparound handling.
+        */
+       u32 last;
+       /**
+        * @inmodeset: Tracks whether the vblank is disabled due to a modeset.
+        * For legacy driver bit 2 additionally tracks whether an additional
+        * temporary vblank reference has been acquired to paper over the
+        * hardware counter resetting/jumping. KMS drivers should instead just
+        * call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
+        * save and restore the vblank count.
+        */
+       unsigned int inmodeset;         /* Display driver is setting mode */
+       /**
+        * @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
+        * structure.
+        */
+       unsigned int pipe;
+       /**
+        * @framedur_ns: Frame/Field duration in ns, used by
+        * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+        * drm_calc_timestamping_constants().
+        */
+       int framedur_ns;
+       /**
+        * @linedur_ns: Line duration in ns, used by
+        * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+        * drm_calc_timestamping_constants().
+        */
+       int linedur_ns;
+       /**
+        * @enabled: Tracks the enabling state of the corresponding &drm_crtc to
+        * avoid double-disabling and hence corrupting saved state. Needed by
+        * drivers not using atomic KMS, since those might go through their CRTC
+        * disabling functions multiple times.
+        */
+       bool enabled;
+};
+
+extern int drm_irq_install(struct drm_device *dev, int irq);
+extern int drm_irq_uninstall(struct drm_device *dev);
+
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+extern int drm_wait_vblank(struct drm_device *dev, void *data,
+                          struct drm_file *filp);
+extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
+extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
+                                         struct timeval *vblanktime);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+                                      struct drm_pending_vblank_event *e);
+extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
+                                     struct drm_pending_vblank_event *e);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
+extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
+extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
+extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
+extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
+extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
+
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+                                                unsigned int pipe, int *max_error,
+                                                struct timeval *vblank_time,
+                                                unsigned flags,
+                                                const struct drm_display_mode *mode);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
+                                           const struct drm_display_mode *mode);
+
+/**
+ * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
+ * @crtc: which CRTC's vblank waitqueue to retrieve
+ *
+ * This function returns a pointer to the vblank waitqueue for the CRTC.
+ * Drivers can use this to implement vblank waits using wait_event() and related
+ * functions.
+ */
+static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
+{
+       return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
+}
+
+#endif
index a5ef2c7e40f8e281e2419ff071b4d391846127f8..cf0e7d89bcdfa07f1c62022ee2f3c985c4f2455d 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __DRM_DRM_LEGACY_H__
 #define __DRM_DRM_LEGACY_H__
 
+#include <drm/drm_auth.h>
+
 /*
  * Legacy driver interfaces for the Direct Rendering Manager
  *
index 7a9840f8b38e03a5efc71a216cfae7b6f2e99bb3..47ac92584d766d8dddb2b0526ea255911d7361c3 100644 (file)
@@ -180,6 +180,8 @@ struct mipi_dsi_device {
        unsigned long mode_flags;
 };
 
+#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
+
 static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
 {
        return container_of(dev, struct mipi_dsi_device, dev);
@@ -263,6 +265,7 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
                                    u16 end);
 int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
                                  u16 end);
+int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline);
 int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
 int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
                             enum mipi_dsi_dcs_tear_mode mode);
index 625966a906f2b0cf50eaa754bcb62e5e8e237354..ff481770d76bbccd6b34a6b9791d60f22ec3e5d7 100644 (file)
@@ -169,6 +169,8 @@ enum drm_mode_status {
  *
  * The horizontal and vertical timings are defined per the following diagram.
  *
+ * ::
+ *
  *
  *               Active                 Front           Sync           Back
  *              Region                 Porch                          Porch
index d4619dc2eecb4d6c9f3cf3bf40899fb5e7171da3..b55f21857a986611f937a152a7caaa7103a3207f 100644 (file)
@@ -736,6 +736,11 @@ struct drm_connector_helper_funcs {
         * inspect dynamic configuration state should instead use
         * @atomic_best_encoder.
         *
+        * You can leave this function to NULL if the connector is only
+        * attached to a single encoder and you are using the atomic helpers.
+        * In this case, the core will call drm_atomic_helper_best_encoder()
+        * for you.
+        *
         * RETURNS:
         *
         * Encoder that should be used for the given connector and connector
@@ -752,8 +757,9 @@ struct drm_connector_helper_funcs {
         * need to select the best encoder depending upon the desired
         * configuration and can't select it statically.
         *
-        * This function is used by drm_atomic_helper_check_modeset() and either
-        * this or @best_encoder is required.
+        * This function is used by drm_atomic_helper_check_modeset().
+        * If it is not implemented, the core will fallback to @best_encoder
+        * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL).
         *
         * NOTE:
         *
@@ -925,4 +931,43 @@ static inline void drm_plane_helper_add(struct drm_plane *plane,
        plane->helper_private = funcs;
 }
 
+/**
+ * struct drm_mode_config_helper_funcs - global modeset helper operations
+ *
+ * These helper functions are used by the atomic helpers.
+ */
+struct drm_mode_config_helper_funcs {
+       /**
+        * @atomic_commit_tail:
+        *
+        * This hook is used by the default atomic_commit() hook implemented in
+        * drm_atomic_helper_commit() together with the nonblocking commit
+        * helpers (see drm_atomic_helper_setup_commit() for a starting point)
+        * to implement blocking and nonblocking commits easily. It is not used
+        * by the atomic helpers
+        *
+        * This hook should first commit the given atomic state to the hardware.
+        * But drivers can add more waiting calls at the start of their
+        * implementation, e.g. to wait for driver-internal request for implicit
+        * syncing, before starting to commit the update to the hardware.
+        *
+        * After the atomic update is committed to the hardware this hook needs
+        * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
+        * to be executed by the hardware, for example using
+        * drm_atomic_helper_wait_for_vblanks(), and then clean up the old
+        * framebuffers using drm_atomic_helper_cleanup_planes().
+        *
+        * When disabling a CRTC this hook _must_ stall for the commit to
+        * complete. Vblank waits don't work on disabled CRTC, hence the core
+        * can't take care of this. And it also can't rely on the vblank event,
+        * since that can be signalled already when the screen shows black,
+        * which can happen much earlier than the last hardware access needed to
+        * shut off the display pipeline completely.
+        *
+        * This hook is optional, the default implementation is
+        * drm_atomic_helper_commit_tail().
+        */
+       void (*atomic_commit_tail)(struct drm_atomic_state *state);
+};
+
 #endif
index 4421f3f4ca8d9f44772d7e9a2a2163bb245e1326..0e0c3573cce0552ea8a9b1b7d82c33c8079d5f9e 100644 (file)
@@ -46,6 +46,7 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
                                  struct drm_rect *src,
                                  struct drm_rect *dest,
                                  const struct drm_rect *clip,
+                                 unsigned int rotation,
                                  int min_scale,
                                  int max_scale,
                                  bool can_position,
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
new file mode 100644 (file)
index 0000000..2690397
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H
+#define __LINUX_DRM_SIMPLE_KMS_HELPER_H
+
+struct drm_simple_display_pipe;
+
+/**
+ * struct drm_simple_display_pipe_funcs - helper operations for a simple
+ *                                        display pipeline
+ */
+struct drm_simple_display_pipe_funcs {
+       /**
+        * @enable:
+        *
+        * This function should be used to enable the pipeline.
+        * It is called when the underlying crtc is enabled.
+        * This hook is optional.
+        */
+       void (*enable)(struct drm_simple_display_pipe *pipe,
+                      struct drm_crtc_state *crtc_state);
+       /**
+        * @disable:
+        *
+        * This function should be used to disable the pipeline.
+        * It is called when the underlying crtc is disabled.
+        * This hook is optional.
+        */
+       void (*disable)(struct drm_simple_display_pipe *pipe);
+
+       /**
+        * @check:
+        *
+        * This function is called in the check phase of an atomic update,
+        * specifically when the underlying plane is checked.
+        * The simple display pipeline helpers already check that the plane is
+        * not scaled, fills the entire visible area and is always enabled
+        * when the crtc is also enabled.
+        * This hook is optional.
+        *
+        * RETURNS:
+        *
+        * 0 on success, -EINVAL if the state or the transition can't be
+        * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
+        * attempt to obtain another state object ran into a &drm_modeset_lock
+        * deadlock.
+        */
+       int (*check)(struct drm_simple_display_pipe *pipe,
+                    struct drm_plane_state *plane_state,
+                    struct drm_crtc_state *crtc_state);
+       /**
+        * @update:
+        *
+        * This function is called when the underlying plane state is updated.
+        * This hook is optional.
+        */
+       void (*update)(struct drm_simple_display_pipe *pipe,
+                      struct drm_plane_state *plane_state);
+};
+
+/**
+ * struct drm_simple_display_pipe - simple display pipeline
+ * @crtc: CRTC control structure
+ * @plane: Plane control structure
+ * @encoder: Encoder control structure
+ * @connector: Connector control structure
+ * @funcs: Pipeline control functions (optional)
+ *
+ * Simple display pipeline with plane, crtc and encoder collapsed into one
+ * entity. It should be initialized by calling drm_simple_display_pipe_init().
+ */
+struct drm_simple_display_pipe {
+       struct drm_crtc crtc;
+       struct drm_plane plane;
+       struct drm_encoder encoder;
+       struct drm_connector *connector;
+
+       const struct drm_simple_display_pipe_funcs *funcs;
+};
+
+int drm_simple_display_pipe_init(struct drm_device *dev,
+                       struct drm_simple_display_pipe *pipe,
+                       const struct drm_simple_display_pipe_funcs *funcs,
+                       const uint32_t *formats, unsigned int format_count,
+                       struct drm_connector *connector);
+
+#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
index 595f85c392ac443cd6cf23e3536c32ee58ecda15..b1755f8db36b602e0883e4d5622db703edd76f2b 100644 (file)
@@ -92,4 +92,7 @@ extern bool i915_gpu_turbo_disable(void);
 #define    I845_TSEG_SIZE_512K (2 << 1)
 #define    I845_TSEG_SIZE_1M   (3 << 1)
 
+#define INTEL_BSM 0x5c
+#define   INTEL_BSM_MASK (0xFFFF << 20)
+
 #endif                         /* _I915_DRM_H_ */
index 9094599a11509c190bfc5ae4df853531f5a442da..33466bfc6440363d4f36d1ca2f09d44098098e8c 100644 (file)
        INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
        INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
        INTEL_VGA_DEVICE(0x5902, info), /* DT  GT1 */ \
+       INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
        INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
        INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
 
        INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
 
 #define INTEL_KBL_GT3_IDS(info) \
+       INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
-       INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \
-       INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
+       INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
 
 #define INTEL_KBL_GT4_IDS(info) \
-       INTEL_VGA_DEVICE(0x5932, info), /* DT  GT4 */ \
-       INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
-       INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
-       INTEL_VGA_DEVICE(0x593D, info)  /* WKS GT4 */
+       INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
 
 #define INTEL_KBL_IDS(info) \
        INTEL_KBL_GT1_IDS(info), \
index 9e9bddaa58a50792ddf0df90979aa5689a300d4e..f49edecd66a37926308397145d1dccf7403caeb8 100644 (file)
@@ -13,6 +13,9 @@ void intel_gmch_remove(void);
 bool intel_enable_gtt(void);
 
 void intel_gtt_chipset_flush(void);
+void intel_gtt_insert_page(dma_addr_t addr,
+                          unsigned int pg,
+                          unsigned int flags);
 void intel_gtt_insert_sg_entries(struct sg_table *st,
                                 unsigned int pg_start,
                                 unsigned int flags);
index c801d9028e37388bc30a8254be8c4ae7df6acb22..6f2c59887ba6d920527475416650976247a2794a 100644 (file)
@@ -173,7 +173,7 @@ struct ttm_tt;
  * @lru: List head for the lru list.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
- * @priv_flags: Flags describing buffer object internal state.
+ * @moving: Fence set when BO is moving
  * @vma_node: Address space manager node.
  * @offset: The current GPU offset, which can have different meanings
  * depending on the memory type. For SYSTEM type memory, it should be 0.
@@ -239,7 +239,7 @@ struct ttm_buffer_object {
         * Members protected by a bo reservation.
         */
 
-       unsigned long priv_flags;
+       struct fence *moving;
 
        struct drm_vma_offset_node vma_node;
 
@@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
  */
 extern int ttm_bo_wait(struct ttm_buffer_object *bo,
                       bool interruptible, bool no_wait);
+
+/**
+ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
+ *
+ * @placement:  Return immediately if buffer is busy.
+ * @mem:  The struct ttm_mem_reg indicating the region where the bo resides
+ * @new_flags: Describes compatible placement found
+ *
+ * Returns true if the placement is compatible
+ */
+extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
+                             struct ttm_mem_reg *mem,
+                             uint32_t *new_flags);
+
 /**
  * ttm_bo_validate
  *
index 513f7f96b80a90e530a4c74179f943a0dff8c40f..e2ebe6666e2beb7fc28d234341413140f67fcfba 100644 (file)
@@ -258,8 +258,10 @@ struct ttm_mem_type_manager_func {
  * reserved by the TTM vm system.
  * @io_reserve_lru: Optional lru list for unreserving io mem regions.
  * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * @move_lock: lock for move fence
  * static information. bdev::driver::io_mem_free is never used.
  * @lru: The lru list for this memory type.
+ * @move: The fence of the last pipelined move operation.
  *
  * This structure is used to identify and manage memory types for a device.
  * It's set up by the ttm_bo_driver::init_mem_type method.
@@ -286,6 +288,7 @@ struct ttm_mem_type_manager {
        struct mutex io_reserve_mutex;
        bool use_io_reserve_lru;
        bool io_reserve_fastpath;
+       spinlock_t move_lock;
 
        /*
         * Protected by @io_reserve_mutex:
@@ -298,6 +301,11 @@ struct ttm_mem_type_manager {
         */
 
        struct list_head lru;
+
+       /*
+        * Protected by @move_lock.
+        */
+       struct fence *move;
 };
 
 /**
@@ -503,9 +511,6 @@ struct ttm_bo_global {
 
 #define TTM_NUM_MEM_TYPES 8
 
-#define TTM_BO_PRIV_FLAG_MOVING  0     /* Buffer object is moving and needs
-                                          idling before CPU mapping */
-#define TTM_BO_PRIV_FLAG_MAX 1
 /**
  * struct ttm_bo_device - Buffer object driver device-specific data.
  *
@@ -622,15 +627,6 @@ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
  */
 extern void ttm_tt_destroy(struct ttm_tt *ttm);
 
-/**
- * ttm_ttm_unbind:
- *
- * @ttm: The struct ttm_tt.
- *
- * Unbind a struct ttm_tt.
- */
-extern void ttm_tt_unbind(struct ttm_tt *ttm);
-
 /**
  * ttm_tt_swapin:
  *
@@ -979,6 +975,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @evict: 1: This is an eviction. Don't try to pipeline.
+ * @interruptible: Sleep interruptible if waiting.
  * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
@@ -993,7 +990,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  */
 
 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
-                             bool evict, bool no_wait_gpu,
+                             bool evict, bool interruptible,
+                             bool no_wait_gpu,
                              struct ttm_mem_reg *new_mem);
 
 /**
@@ -1011,7 +1009,6 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  * @bo: A pointer to a struct ttm_buffer_object.
  * @fence: A fence object that signals when moving is complete.
  * @evict: This is an evict move. Don't return until the buffer is idle.
- * @no_wait_gpu: Return immediately if the GPU is busy.
  * @new_mem: struct ttm_mem_reg indicating where to move.
  *
  * Accelerated move function to be called when an accelerated move
@@ -1023,9 +1020,24 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
  */
 
 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
-                                    struct fence *fence,
-                                    bool evict, bool no_wait_gpu,
+                                    struct fence *fence, bool evict,
                                     struct ttm_mem_reg *new_mem);
+
+/**
+ * ttm_bo_pipeline_move.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @new_mem: struct ttm_mem_reg indicating where to move.
+ *
+ * Function for pipelining accelerated moves. Either free the memory
+ * immediately or hang it on a temporary buffer object.
+ */
+int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
+                        struct fence *fence, bool evict,
+                        struct ttm_mem_reg *new_mem);
+
 /**
  * ttm_io_prot
  *
index fe389ac3148915416c4fe7dba63a6b5159a3bc3d..92e7e97ca8ff0f95ef9dba95b3e7078dd8c73049 100644 (file)
 #ifndef __ASM_ARM_KVM_PMU_H
 #define __ASM_ARM_KVM_PMU_H
 
-#ifdef CONFIG_KVM_ARM_PMU
-
 #include <linux/perf_event.h>
 #include <asm/perf_event.h>
 
 #define ARMV8_PMU_CYCLE_IDX            (ARMV8_PMU_MAX_COUNTERS - 1)
 
+#ifdef CONFIG_KVM_ARM_PMU
+
 struct kvm_pmc {
        u8 idx; /* index into the pmu->pmc array */
        struct perf_event *perf_event;
index 961a417d641e5221264e75b4d987fc3015607d41..e38e3fc13ea8764a66d4c84a6dea1bee3f4e630b 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/sched.h>
 #include <linux/ptrace.h>
 #include <uapi/linux/audit.h>
-#include <linux/tty.h>
 
 #define AUDIT_INO_UNSET ((unsigned long)-1)
 #define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -348,23 +347,6 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
        return tsk->sessionid;
 }
 
-static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
-{
-       struct tty_struct *tty = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&tsk->sighand->siglock, flags);
-       if (tsk->signal)
-               tty = tty_kref_get(tsk->signal->tty);
-       spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
-       return tty;
-}
-
-static inline void audit_put_tty(struct tty_struct *tty)
-{
-       tty_kref_put(tty);
-}
-
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
 extern void __audit_bprm(struct linux_binprm *bprm);
@@ -522,12 +504,6 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
 {
        return -1;
 }
-static inline struct tty_struct *audit_get_tty(struct task_struct *tsk)
-{
-       return NULL;
-}
-static inline void audit_put_tty(struct tty_struct *tty)
-{ }
 static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
 { }
 static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
index e6b41f42602ba7cc32e2945e15309d06f3028f95..3db25df396cbb88d2135874818f4fb38a71b2eee 100644 (file)
@@ -159,6 +159,7 @@ struct bcma_host_ops {
 #define BCMA_CORE_DEFAULT              0xFFF
 
 #define BCMA_MAX_NR_CORES              16
+#define BCMA_CORE_SIZE                 0x1000
 
 /* Chip IDs of PCIe devices */
 #define BCMA_CHIP_ID_BCM4313   0x4313
index 576e4639ca609e8bfb8686474406073937e912e9..314b3caa701cc20a6c3fcd628de3c48b7c0c7d21 100644 (file)
@@ -65,6 +65,7 @@ struct coredump_params {
        unsigned long limit;
        unsigned long mm_flags;
        loff_t written;
+       loff_t pos;
 };
 
 /*
index 8ee27b8afe81c5d45b66f7b629c976ed8138d26a..0de4de6dd43e09aae40a72500b0e5a2fb9e89c1e 100644 (file)
@@ -111,6 +111,31 @@ enum bpf_access_type {
        BPF_WRITE = 2
 };
 
+/* types of values stored in eBPF registers */
+enum bpf_reg_type {
+       NOT_INIT = 0,            /* nothing was written into register */
+       UNKNOWN_VALUE,           /* reg doesn't contain a valid pointer */
+       PTR_TO_CTX,              /* reg points to bpf_context */
+       CONST_PTR_TO_MAP,        /* reg points to struct bpf_map */
+       PTR_TO_MAP_VALUE,        /* reg points to map element value */
+       PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
+       FRAME_PTR,               /* reg == frame_pointer */
+       PTR_TO_STACK,            /* reg == frame_pointer + imm */
+       CONST_IMM,               /* constant integer value */
+
+       /* PTR_TO_PACKET represents:
+        * skb->data
+        * skb->data + imm
+        * skb->data + (u16) var
+        * skb->data + (u16) var + imm
+        * if (range > 0) then [ptr, ptr + range - off) is safe to access
+        * if (id > 0) means that some 'var' was added
+        * if (off > 0) menas that 'imm' was added
+        */
+       PTR_TO_PACKET,
+       PTR_TO_PACKET_END,       /* skb->data + headlen */
+};
+
 struct bpf_prog;
 
 struct bpf_verifier_ops {
@@ -120,7 +145,8 @@ struct bpf_verifier_ops {
        /* return true if 'size' wide access at offset 'off' within bpf_context
         * with 'type' (read or write) is allowed
         */
-       bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+       bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
+                               enum bpf_reg_type *reg_type);
 
        u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
                                  int src_reg, int ctx_off,
@@ -238,6 +264,10 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 static inline void bpf_prog_put(struct bpf_prog *prog)
 {
 }
+
+static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
+{
+}
 #endif /* CONFIG_BPF_SYSCALL */
 
 /* verifier prototypes for helper functions called from eBPF programs */
index 19b14862d3e0d9afdc2a524e940f62944428ca89..1b3b6e15539238f8d88696762548de9d786c4349 100644 (file)
@@ -279,6 +279,11 @@ struct ceph_osd_client {
        struct workqueue_struct *notify_wq;
 };
 
+static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag)
+{
+       return osdc->osdmap->flags & flag;
+}
+
 extern int ceph_osdc_setup(void);
 extern void ceph_osdc_cleanup(void);
 
index ddc426b22d8159811a9514f01e09d6758a1cf04c..9ccf4dbe55f81722c6507fdcd76ab92417525438 100644 (file)
@@ -189,11 +189,6 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
        return !ceph_osd_is_up(map, osd);
 }
 
-static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
-{
-       return map && (map->flags & flag);
-}
-
 extern char *ceph_osdmap_state_str(char *str, int len, int state);
 extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
 
index 0c72204c75fc9428f08c9e63667bf75dee0bc153..fb39d5add173f94a3f9be0e77455e0f5b54764d8 100644 (file)
@@ -25,7 +25,7 @@
 #define CLK_SET_PARENT_GATE    BIT(1) /* must be gated across re-parent */
 #define CLK_SET_RATE_PARENT    BIT(2) /* propagate rate change up one level */
 #define CLK_IGNORE_UNUSED      BIT(3) /* do not gate even if unused */
-#define CLK_IS_ROOT            BIT(4) /* Deprecated: Don't use */
+                               /* unused */
 #define CLK_IS_BASIC           BIT(5) /* Basic clk, can't do a to_clk_foo() */
 #define CLK_GET_RATE_NOCACHE   BIT(6) /* do not use the cached clk rate */
 #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
index 786ad32631a672695d88b47d67c1019df624acab..07b83d32f66c857aeabe1eae476db0c1a9e5fbda 100644 (file)
@@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
 extern int cpuidle_play_dead(void);
 
 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
+static inline struct cpuidle_device *cpuidle_get_device(void)
+{return __this_cpu_read(cpuidle_devices); }
 #else
 static inline void disable_cpuidle(void) { }
 static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
@@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
 static inline int cpuidle_play_dead(void) {return -ENODEV; }
 static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
        struct cpuidle_device *dev) {return NULL; }
+static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
 #endif
 
 #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
index 484c8792da825ed27bc17485fa3b4807e60d745f..f53fa055021ab16bc182b739dcb665d1a70ce4a6 100644 (file)
@@ -212,6 +212,7 @@ struct dentry_operations {
 #define DCACHE_OP_REAL                 0x08000000
 
 #define DCACHE_PAR_LOOKUP              0x10000000 /* being looked up (with parent locked shared) */
+#define DCACHE_DENTRY_CURSOR           0x20000000
 
 extern seqlock_t rename_lock;
 
@@ -575,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
        return inode;
 }
 
+/**
+ * d_real_inode - Return the real inode
+ * @dentry: The dentry to query
+ *
+ * If dentry is on an union/overlay, then return the underlying, real inode.
+ * Otherwise return d_inode().
+ */
+static inline struct inode *d_real_inode(struct dentry *dentry)
+{
+       return d_backing_inode(d_real(dentry));
+}
+
 
 #endif /* __LINUX_DCACHE_H */
index 5871f292b596fb58349c1f4c6e0202aede55b7f8..277ab9af9ac29a95773ce2201809f7c7613391a1 100644 (file)
 
 #include <linux/errno.h>
 
-struct pts_fs_info;
-
 #ifdef CONFIG_UNIX98_PTYS
 
-/* Look up a pts fs info and get a ref to it */
-struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
-void devpts_put_ref(struct pts_fs_info *);
+struct pts_fs_info;
+
+struct pts_fs_info *devpts_acquire(struct file *);
+void devpts_release(struct pts_fs_info *);
 
 int devpts_new_index(struct pts_fs_info *);
 void devpts_kill_index(struct pts_fs_info *, int);
index 3fe90d494edb3da67f5f3cdef86d3cda9353fea8..e0b0741ae67199d48045d97a0adbffdab1c894ca 100644 (file)
@@ -112,19 +112,24 @@ struct dma_buf_ops {
  * @file: file pointer used for sharing buffers across, and for refcounting.
  * @attachments: list of dma_buf_attachment that denotes all devices attached.
  * @ops: dma_buf_ops associated with this buffer object.
+ * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap
+ * @vmapping_counter: used internally to refcnt the vmaps
+ * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
  * @exp_name: name of the exporter; useful for debugging.
  * @owner: pointer to exporter module; used for refcounting when exporter is a
  *         kernel module.
  * @list_node: node for dma_buf accounting and debugging.
  * @priv: exporter specific private data for this buffer object.
  * @resv: reservation object linked to this dma-buf
+ * @poll: for userspace poll support
+ * @cb_excl: for userspace poll support
+ * @cb_shared: for userspace poll support
  */
 struct dma_buf {
        size_t size;
        struct file *file;
        struct list_head attachments;
        const struct dma_buf_ops *ops;
-       /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
        struct mutex lock;
        unsigned vmapping_counter;
        void *vmap_ptr;
@@ -188,9 +193,11 @@ struct dma_buf_export_info {
 
 /**
  * helper macro for exporters; zeros and fills in most common values
+ *
+ * @name: export-info name
  */
-#define DEFINE_DMA_BUF_EXPORT_INFO(a)  \
-       struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \
+#define DEFINE_DMA_BUF_EXPORT_INFO(name)       \
+       struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
                                         .owner = THIS_MODULE }
 
 /**
@@ -235,6 +242,4 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
                 unsigned long);
 void *dma_buf_vmap(struct dma_buf *);
 void dma_buf_vunmap(struct dma_buf *, void *vaddr);
-int dma_buf_debugfs_create_file(const char *name,
-                               int (*write)(struct seq_file *));
 #endif /* __DMA_BUF_H__ */
index c2db3ca22217c85891f06b53496bb6f644d2399e..f196dd0b0f2f6c241715807f63bdccaa30973ce8 100644 (file)
@@ -1005,7 +1005,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
 /* Iterate through an efi_memory_map */
 #define for_each_efi_memory_desc_in_map(m, md)                            \
        for ((md) = (m)->map;                                              \
-            (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
+            ((void *)(md) + (m)->desc_size) <= (m)->map_end;              \
             (md) = (void *)(md) + (m)->desc_size)
 
 /**
diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h
new file mode 100644 (file)
index 0000000..86baaa4
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * fence-array: aggregates fence to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ *     Gustavo Padovan <gustavo@padovan.org>
+ *     Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_FENCE_ARRAY_H
+#define __LINUX_FENCE_ARRAY_H
+
+#include <linux/fence.h>
+
+/**
+ * struct fence_array_cb - callback helper for fence array
+ * @cb: fence callback structure for signaling
+ * @array: reference to the parent fence array object
+ */
+struct fence_array_cb {
+       struct fence_cb cb;
+       struct fence_array *array;
+};
+
+/**
+ * struct fence_array - fence to represent an array of fences
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @num_fences: number of fences in the array
+ * @num_pending: fences in the array still pending
+ * @fences: array of the fences
+ */
+struct fence_array {
+       struct fence base;
+
+       spinlock_t lock;
+       unsigned num_fences;
+       atomic_t num_pending;
+       struct fence **fences;
+};
+
+extern const struct fence_ops fence_array_ops;
+
+/**
+ * to_fence_array - cast a fence to a fence_array
+ * @fence: fence to cast to a fence_array
+ *
+ * Returns NULL if the fence is not a fence_array,
+ * or the fence_array otherwise.
+ */
+static inline struct fence_array *to_fence_array(struct fence *fence)
+{
+       if (fence->ops != &fence_array_ops)
+               return NULL;
+
+       return container_of(fence, struct fence_array, base);
+}
+
+struct fence_array *fence_array_create(int num_fences, struct fence **fences,
+                                      u64 context, unsigned seqno,
+                                      bool signal_on_any);
+
+#endif /* __LINUX_FENCE_ARRAY_H */
index 2b17698b60b844a6c08b490f506ec87033d89fc5..44d945e964730a56820eeea163a4af2c80b0a4c2 100644 (file)
@@ -49,6 +49,8 @@ struct fence_cb;
  * @timestamp: Timestamp when the fence was signaled.
  * @status: Optional, only valid if < 0, must be set before calling
  * fence_signal, indicates that the fence has completed with an error.
+ * @child_list: list of children fences
+ * @active_list: list of active fences
  *
  * the flags member must be manipulated and read using the appropriate
  * atomic ops (bit_*), so taking the spinlock will not be needed most
@@ -75,7 +77,8 @@ struct fence {
        struct rcu_head rcu;
        struct list_head cb_list;
        spinlock_t *lock;
-       unsigned context, seqno;
+       u64 context;
+       unsigned seqno;
        unsigned long flags;
        ktime_t timestamp;
        int status;
@@ -178,7 +181,7 @@ struct fence_ops {
 };
 
 void fence_init(struct fence *fence, const struct fence_ops *ops,
-               spinlock_t *lock, unsigned context, unsigned seqno);
+               spinlock_t *lock, u64 context, unsigned seqno);
 
 void fence_release(struct kref *kref);
 void fence_free(struct fence *fence);
@@ -352,27 +355,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr)
        return ret < 0 ? ret : 0;
 }
 
-unsigned fence_context_alloc(unsigned num);
+u64 fence_context_alloc(unsigned num);
 
 #define FENCE_TRACE(f, fmt, args...) \
        do {                                                            \
                struct fence *__ff = (f);                               \
                if (config_enabled(CONFIG_FENCE_TRACE))                 \
-                       pr_info("f %u#%u: " fmt,                        \
+                       pr_info("f %llu#%u: " fmt,                      \
                                __ff->context, __ff->seqno, ##args);    \
        } while (0)
 
 #define FENCE_WARN(f, fmt, args...) \
        do {                                                            \
                struct fence *__ff = (f);                               \
-               pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno,    \
+               pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno,  \
                         ##args);                                       \
        } while (0)
 
 #define FENCE_ERR(f, fmt, args...) \
        do {                                                            \
                struct fence *__ff = (f);                               \
-               pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno,     \
+               pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno,   \
                        ##args);                                        \
        } while (0)
 
index 6fc31ef1da2d8e9efe5d91b8e4349ed4fe0dd809..8f74f3d61894a4ad58b5cc3457f68f318a84c970 100644 (file)
@@ -467,7 +467,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 }
 #endif /* CONFIG_DEBUG_SET_MODULE_RONX */
 
-int sk_filter(struct sock *sk, struct sk_buff *skb);
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+{
+       return sk_filter_trim_cap(sk, skb, 1);
+}
 
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
 void bpf_prog_free(struct bpf_prog *fp);
index 604e1526cd00a23e27d6a426c8baa6a71bc309c2..13ba552e6c094e82ee8b952b26d9f0bf5eeebc39 100644 (file)
@@ -241,7 +241,7 @@ struct fscache_cache_ops {
 
        /* check the consistency between the backing cache and the FS-Cache
         * cookie */
-       bool (*check_consistency)(struct fscache_operation *op);
+       int (*check_consistency)(struct fscache_operation *op);
 
        /* store the updated auxiliary data on an object */
        void (*update_object)(struct fscache_object *object);
index 419fb9e03447aff8aef55934e89bbd844a28d7e7..f0a7a0320300bae6eca05e27a2083f80ed54282e 100644 (file)
@@ -94,7 +94,7 @@ static inline int split_huge_page(struct page *page)
 void deferred_split_huge_page(struct page *page);
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long address, bool freeze);
+               unsigned long address, bool freeze, struct page *page);
 
 #define split_huge_pmd(__vma, __pmd, __address)                                \
        do {                                                            \
@@ -102,7 +102,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                if (pmd_trans_huge(*____pmd)                            \
                                        || pmd_devmap(*____pmd))        \
                        __split_huge_pmd(__vma, __pmd, __address,       \
-                                               false);                 \
+                                               false, NULL);           \
        }  while (0)
 
 
index d029ffac0d691ab51065cd960ab53a926e9bbf39..99403b19092f2da0ecadaffd21e9fc8065539434 100644 (file)
@@ -223,6 +223,8 @@ struct st_sensor_settings {
  * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
  * @tf: Transfer function structure used by I/O operations.
  * @tb: Transfer buffers and mutex used by I/O operations.
+ * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
+ * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
  */
 struct st_sensor_data {
        struct device *dev;
@@ -247,6 +249,9 @@ struct st_sensor_data {
 
        const struct st_sensor_transfer_function *tf;
        struct st_sensor_transfer_buffer tb;
+
+       bool hw_irq_trigger;
+       s64 hw_timestamp;
 };
 
 #ifdef CONFIG_IIO_BUFFER
@@ -260,7 +265,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                                const struct iio_trigger_ops *trigger_ops);
 
 void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
-
+int st_sensors_validate_device(struct iio_trigger *trig,
+                              struct iio_dev *indio_dev);
 #else
 static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                                const struct iio_trigger_ops *trigger_ops)
@@ -271,6 +277,7 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
 {
        return;
 }
+#define st_sensors_validate_device NULL
 #endif
 
 int st_sensors_init_sensor(struct iio_dev *indio_dev,
index 7c27fa1030e873d1841f3e291e9fc806992967e3..feb04ea20f11cb6e1a39fc93f7ab9d054907de99 100644 (file)
@@ -52,6 +52,12 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
 
 int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
 
+void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+
+int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+                            struct inet_diag_msg *r, int ext,
+                            struct user_namespace *user_ns);
+
 extern int  inet_diag_register(const struct inet_diag_handler *handler);
 extern void inet_diag_unregister(const struct inet_diag_handler *handler);
 #endif /* _INET_DIAG_H_ */
index f2cb8d45513d182bef20020c775c617f8215af3a..f8834f820ec2475f0c4dab07df153034ccd7d26c 100644 (file)
@@ -190,7 +190,7 @@ extern struct task_group root_task_group;
 #define INIT_TASK(tsk) \
 {                                                                      \
        .state          = 0,                                            \
-       .stack          = &init_thread_info,                            \
+       .stack          = init_stack,                                   \
        .usage          = ATOMIC_INIT(2),                               \
        .flags          = PF_KTHREAD,                                   \
        .prio           = MAX_PRIO-20,                                  \
index e399029b68c5bbdc5ae613c681217c9c554c0f9f..645ad06b5d52134b877322f86184c2729f487517 100644 (file)
@@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
 }
 
 static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_wc(struct io_mapping *mapping,
+                 unsigned long offset,
+                 unsigned long size)
 {
        resource_size_t phys_addr;
 
        BUG_ON(offset >= mapping->size);
        phys_addr = mapping->base + offset;
 
-       return ioremap_wc(phys_addr, PAGE_SIZE);
+       return ioremap_wc(phys_addr, size);
 }
 
 static inline void
@@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
 
 /* Non-atomic map/unmap */
 static inline void __iomem *
-io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+io_mapping_map_wc(struct io_mapping *mapping,
+                 unsigned long offset,
+                 unsigned long size)
 {
        return ((char __force __iomem *) mapping) + offset;
 }
index bfbd707de390df9ea6734406d7ac7fb94c26e335..dc493e0f0ff7399857b63cb515508bd48e76ed0c 100644 (file)
 #define ICC_SGI1R_AFFINITY_1_SHIFT     16
 #define ICC_SGI1R_AFFINITY_1_MASK      (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
 #define ICC_SGI1R_SGI_ID_SHIFT         24
-#define ICC_SGI1R_SGI_ID_MASK          (0xff << ICC_SGI1R_SGI_ID_SHIFT)
+#define ICC_SGI1R_SGI_ID_MASK          (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
 #define ICC_SGI1R_AFFINITY_2_SHIFT     32
-#define ICC_SGI1R_AFFINITY_2_MASK      (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_2_MASK      (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
 #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
 #define ICC_SGI1R_AFFINITY_3_SHIFT     48
-#define ICC_SGI1R_AFFINITY_3_MASK      (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_AFFINITY_3_MASK      (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
 
 #include <asm/arch_gicv3.h>
 
index 5ab85281230b9dd688a3cad78217a42e932ab746..f2d0258414cfef9a98384f3c59d691f387ef5862 100644 (file)
@@ -6,6 +6,7 @@
 #define __LINUX_ISA_H
 
 #include <linux/device.h>
+#include <linux/errno.h>
 #include <linux/kernel.h>
 
 struct isa_driver {
@@ -22,13 +23,13 @@ struct isa_driver {
 
 #define to_isa_driver(x) container_of((x), struct isa_driver, driver)
 
-#ifdef CONFIG_ISA
+#ifdef CONFIG_ISA_BUS_API
 int isa_register_driver(struct isa_driver *, unsigned int);
 void isa_unregister_driver(struct isa_driver *);
 #else
 static inline int isa_register_driver(struct isa_driver *d, unsigned int i)
 {
-       return 0;
+       return -ENODEV;
 }
 
 static inline void isa_unregister_driver(struct isa_driver *d)
index 0536524bb9eb6467013a51a70ed28b576494e649..68904469fba1b7492efda1b846edab53ed1f3b4e 100644 (file)
@@ -117,13 +117,18 @@ struct module;
 
 #include <linux/atomic.h>
 
+#ifdef HAVE_JUMP_LABEL
+
 static inline int static_key_count(struct static_key *key)
 {
-       return atomic_read(&key->enabled);
+       /*
+        * -1 means the first static_key_slow_inc() is in progress.
+        *  static_key_enabled() must return true, so return 1 here.
+        */
+       int n = atomic_read(&key->enabled);
+       return n >= 0 ? n : 1;
 }
 
-#ifdef HAVE_JUMP_LABEL
-
 #define JUMP_TYPE_FALSE        0UL
 #define JUMP_TYPE_TRUE 1UL
 #define JUMP_TYPE_MASK 1UL
@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
 
 #else  /* !HAVE_JUMP_LABEL */
 
+static inline int static_key_count(struct static_key *key)
+{
+       return atomic_read(&key->enabled);
+}
+
 static __always_inline void jump_label_init(void)
 {
        static_key_initialized = true;
index 611927f5870d203e3b987d9cc462f8ce74a59b91..ac4b3c46a84d1493d59e22e241c2cc7b9045ce27 100644 (file)
@@ -59,14 +59,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
 
 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
 void kasan_kfree_large(const void *ptr);
-void kasan_kfree(void *ptr);
+void kasan_poison_kfree(void *ptr);
 void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
                  gfp_t flags);
 void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
 
 void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
 bool kasan_slab_free(struct kmem_cache *s, void *object);
-void kasan_poison_slab_free(struct kmem_cache *s, void *object);
 
 struct kasan_cache {
        int alloc_meta_offset;
@@ -76,6 +75,9 @@ struct kasan_cache {
 int kasan_module_alloc(void *addr, size_t size);
 void kasan_free_shadow(const struct vm_struct *vm);
 
+size_t ksize(const void *);
+static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
+
 #else /* CONFIG_KASAN */
 
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@@ -102,7 +104,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
 
 static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
 static inline void kasan_kfree_large(const void *ptr) {}
-static inline void kasan_kfree(void *ptr) {}
+static inline void kasan_poison_kfree(void *ptr) {}
 static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
                                size_t size, gfp_t flags) {}
 static inline void kasan_krealloc(const void *object, size_t new_size,
@@ -114,11 +116,12 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
 {
        return false;
 }
-static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
 
 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
 
+static inline void kasan_unpoison_slab(const void *ptr) { }
+
 #endif /* CONFIG_KASAN */
 
 #endif /* LINUX_KASAN_H */
index d2b13066e78155ccacc072afcd75a2ae2bcb23fa..e5e7f2e80a54dd2f15737a3ef478287458641f6e 100644 (file)
@@ -42,15 +42,16 @@ struct led_classdev {
 #define LED_UNREGISTERING      (1 << 1)
        /* Upper 16 bits reflect control information */
 #define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_BLINK_ONESHOT      (1 << 17)
-#define LED_BLINK_ONESHOT_STOP (1 << 18)
-#define LED_BLINK_INVERT       (1 << 19)
-#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20)
-#define LED_BLINK_DISABLE      (1 << 21)
-#define LED_SYSFS_DISABLE      (1 << 22)
-#define LED_DEV_CAP_FLASH      (1 << 23)
-#define LED_HW_PLUGGABLE       (1 << 24)
-#define LED_PANIC_INDICATOR    (1 << 25)
+#define LED_BLINK_SW           (1 << 17)
+#define LED_BLINK_ONESHOT      (1 << 18)
+#define LED_BLINK_ONESHOT_STOP (1 << 19)
+#define LED_BLINK_INVERT       (1 << 20)
+#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
+#define LED_BLINK_DISABLE      (1 << 22)
+#define LED_SYSFS_DISABLE      (1 << 23)
+#define LED_DEV_CAP_FLASH      (1 << 24)
+#define LED_HW_PLUGGABLE       (1 << 25)
+#define LED_PANIC_INDICATOR    (1 << 26)
 
        /* Set LED brightness level
         * Must not sleep. Use brightness_set_blocking for drivers
@@ -72,8 +73,8 @@ struct led_classdev {
         * and if both are zero then a sensible default should be chosen.
         * The call should adjust the timings in that case and if it can't
         * match the values specified exactly.
-        * Deactivate blinking again when the brightness is set to a fixed
-        * value via the brightness_set() callback.
+        * Deactivate blinking again when the brightness is set to LED_OFF
+        * via the brightness_set() callback.
         */
        int             (*blink_set)(struct led_classdev *led_cdev,
                                     unsigned long *delay_on,
index a805474df4abd8c70c83bdbd3b383ad9c02eca1c..56e6069d245271539f14bf34c204122665e28ab6 100644 (file)
@@ -97,6 +97,11 @@ enum mem_cgroup_events_target {
 #define MEM_CGROUP_ID_SHIFT    16
 #define MEM_CGROUP_ID_MAX      USHRT_MAX
 
+struct mem_cgroup_id {
+       int id;
+       atomic_t ref;
+};
+
 struct mem_cgroup_stat_cpu {
        long count[MEMCG_NR_STAT];
        unsigned long events[MEMCG_NR_EVENTS];
@@ -172,6 +177,9 @@ enum memcg_kmem_state {
 struct mem_cgroup {
        struct cgroup_subsys_state css;
 
+       /* Private memcg ID. Used to ID objects that outlive the cgroup */
+       struct mem_cgroup_id id;
+
        /* Accounted resources */
        struct page_counter memory;
        struct page_counter swap;
@@ -330,22 +338,9 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
        if (mem_cgroup_disabled())
                return 0;
 
-       return memcg->css.id;
-}
-
-/**
- * mem_cgroup_from_id - look up a memcg from an id
- * @id: the id to look up
- *
- * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
- */
-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
-{
-       struct cgroup_subsys_state *css;
-
-       css = css_from_id(id, &memory_cgrp_subsys);
-       return mem_cgroup_from_css(css);
+       return memcg->id.id;
 }
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
 
 /**
  * parent_mem_cgroup - find the accounting parent of a memcg
index c18a4c19d6fcedcd66adebdb45c383af09930408..ce9230af09c20c0498e1111fdf148b8f6406b30e 100644 (file)
@@ -171,7 +171,7 @@ static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
 static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
                                      unsigned reg_cnt, unsigned char *val)
 {
-       int ret;
+       int ret = 0;
        int i;
 
        for (i = 0; i < reg_cnt; i++) {
index 80dec87a94f8431024d88a6303f1f9189b2114e6..d46a0e7f144df536bfba29325904a2a0bf8c7cf7 100644 (file)
@@ -466,6 +466,7 @@ enum {
 enum {
        MLX4_INTERFACE_STATE_UP         = 1 << 0,
        MLX4_INTERFACE_STATE_DELETION   = 1 << 1,
+       MLX4_INTERFACE_STATE_SHUTDOWN   = 1 << 2,
 };
 
 #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
index 035abdf62cfe953e5b15822aa847868032989574..73a48479892dd60ffc60a52e508764a4f897abde 100644 (file)
@@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out {
        u8                      rsvd[8];
 };
 
-#define MLX5_CMD_OP_MAX 0x920
-
 enum {
        VPORT_STATE_DOWN                = 0x0,
        VPORT_STATE_UP                  = 0x1,
@@ -1369,6 +1367,12 @@ enum mlx5_cap_type {
 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
        MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
 
+#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
+       MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
+       MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+
 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
        MLX5_GET(flow_table_eswitch_cap, \
                 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
index 80776d0c52dc9c48b7a02842caacbf9699e73507..fd72ecf0ce9fe55e3e0b1be1bc1ea6231c4378f2 100644 (file)
@@ -629,6 +629,7 @@ struct mlx5_cmd_work_ent {
        void                   *uout;
        int                     uout_size;
        mlx5_cmd_cbk_t          callback;
+       struct delayed_work     cb_timeout_work;
        void                   *context;
        int                     idx;
        struct completion       done;
index 9a05cd7e5890b43400c65f6c903d5d815daffa28..e955a285900991c5623fca30010a960f68b7c7e8 100644 (file)
@@ -205,7 +205,8 @@ enum {
        MLX5_CMD_OP_ALLOC_FLOW_COUNTER            = 0x939,
        MLX5_CMD_OP_DEALLOC_FLOW_COUNTER          = 0x93a,
        MLX5_CMD_OP_QUERY_FLOW_COUNTER            = 0x93b,
-       MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c
+       MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c,
+       MLX5_CMD_OP_MAX
 };
 
 struct mlx5_ifc_flow_table_fields_supported_bits {
@@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits {
        u8         vport_svlan_insert[0x1];
        u8         vport_cvlan_insert_if_not_exist[0x1];
        u8         vport_cvlan_insert_overwrite[0x1];
-       u8         reserved_at_5[0x1b];
+       u8         reserved_at_5[0x19];
+       u8         nic_vport_node_guid_modify[0x1];
+       u8         nic_vport_port_guid_modify[0x1];
 
        u8         reserved_at_20[0x7e0];
 };
@@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
 };
 
 struct mlx5_ifc_modify_nic_vport_field_select_bits {
-       u8         reserved_at_0[0x19];
+       u8         reserved_at_0[0x16];
+       u8         node_guid[0x1];
+       u8         port_guid[0x1];
+       u8         reserved_at_18[0x1];
        u8         mtu[0x1];
        u8         change_event[0x1];
        u8         promisc[0x1];
index 64221027bf1f0d8bdaa2a1506e18da338e0f0c1e..ab310819ac3605a426c35ae273d4a66926794c48 100644 (file)
@@ -172,6 +172,7 @@ enum {
 enum {
        MLX5_FENCE_MODE_NONE                    = 0 << 5,
        MLX5_FENCE_MODE_INITIATOR_SMALL         = 1 << 5,
+       MLX5_FENCE_MODE_FENCE                   = 2 << 5,
        MLX5_FENCE_MODE_STRONG_ORDERING         = 3 << 5,
        MLX5_FENCE_MODE_SMALL_AND_FENCE         = 4 << 5,
 };
@@ -460,10 +461,9 @@ struct mlx5_core_qp {
 };
 
 struct mlx5_qp_path {
-       u8                      fl;
+       u8                      fl_free_ar;
        u8                      rsvd3;
-       u8                      free_ar;
-       u8                      pkey_index;
+       __be16                  pkey_index;
        u8                      rsvd0;
        u8                      grh_mlid;
        __be16                  rlid;
@@ -560,6 +560,7 @@ struct mlx5_modify_qp_mbox_in {
        __be32                  optparam;
        u8                      rsvd0[4];
        struct mlx5_qp_context  ctx;
+       u8                      rsvd2[16];
 };
 
 struct mlx5_modify_qp_mbox_out {
index 301da4a5e6bfa340363799310a21251c685935f0..6c16c198f680c26df40d1322ca1381e6bbb90510 100644 (file)
@@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
                                           u64 *system_image_guid);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+                                   u32 vport, u64 node_guid);
 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
                                        u16 *qkey_viol_cntr);
 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
index 5df5feb49575cd66c17d574b7431ed5ee60e7ec3..ece042dfe23cbf46c196b868c8efaff71d7cc828 100644 (file)
@@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 }
 
 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
-               struct page *page, pte_t *pte, bool write, bool anon, bool old);
+               struct page *page, pte_t *pte, bool write, bool anon);
 #endif
 
 /*
index ec5ec2818a288d52158098e829fb57d68e4ec87b..d3d0398f2a1b4728f3db324eeeb86feab02a98a5 100644 (file)
@@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 #define LOOKUP_ROOT            0x2000
 #define LOOKUP_EMPTY           0x4000
 
+extern int path_pts(struct path *path);
+
 extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
 
 static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
index 9aa49a05fe389214c46b6ea48577c813d525a874..25aa03b51c4e1e2f6fa1a9d257493dc62098f982 100644 (file)
@@ -251,7 +251,8 @@ do {                                                                        \
        DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
        if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&        \
            net_ratelimit())                                            \
-               __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);    \
+               __dynamic_pr_debug(&descriptor, pr_fmt(fmt),            \
+                                  ##__VA_ARGS__);                      \
 } while (0)
 #elif defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)                          \
index f45929ce815725d868261e9a2585ac53d0c8f128..da4b33bea9828857a28ab9060cbfe2d491cd3058 100644 (file)
@@ -4145,6 +4145,13 @@ static inline void netif_keep_dst(struct net_device *dev)
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
 }
 
+/* return true if dev can't cope with mtu frames that need vlan tag insertion */
+static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
+{
+       /* TODO: reserve and use an additional IFF bit, if we get more users */
+       return dev->priv_flags & IFF_MACSEC;
+}
+
 extern struct pernet_operations __net_initdata loopback_net_ops;
 
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
index c7292e8ea080118b9d09542a875b95ebbddc31b3..74eb28cadbef032017ae12fe4d11ab4f70996d63 100644 (file)
@@ -614,7 +614,7 @@ static inline struct device_node *of_parse_phandle(const struct device_node *np,
        return NULL;
 }
 
-static inline int of_parse_phandle_with_args(struct device_node *np,
+static inline int of_parse_phandle_with_args(const struct device_node *np,
                                             const char *list_name,
                                             const char *cells_name,
                                             int index,
index f6e9e85164e8bd8dc3c110d01fdca23a504201ba..b969e944396223defb902c7e7f2ac1238b410585 100644 (file)
@@ -8,7 +8,7 @@ struct pci_dev;
 struct of_phandle_args;
 struct device_node;
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_PCI
 int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
 struct device_node *of_pci_find_child_device(struct device_node *parent,
                                             unsigned int devfn);
index 1779cda99ef7c782b87e57baa3ab6dc979e4171b..f8e1992d6423ca837e731ddb32c83c6ff9a6fe98 100644 (file)
@@ -34,6 +34,13 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
                                       struct device_node *np, int idx);
 void of_reserved_mem_device_release(struct device *dev);
 
+int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+                                            phys_addr_t align,
+                                            phys_addr_t start,
+                                            phys_addr_t end,
+                                            bool nomap,
+                                            phys_addr_t *res_base);
+
 void fdt_init_reserved_mem(void);
 void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
                               phys_addr_t base, phys_addr_t size);
index bf268fa92c5b34a1bffce97a8cd38acf2b161059..fec40271339f8044aa1e3f1743e62017f146f9cc 100644 (file)
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
 
 static inline bool page_is_young(struct page *page)
 {
-       return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return false;
+
+       return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline void set_page_young(struct page *page)
 {
-       set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return;
+
+       set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline bool test_and_clear_page_young(struct page *page)
 {
-       return test_and_clear_bit(PAGE_EXT_YOUNG,
-                                 &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return false;
+
+       return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
 }
 
 static inline bool page_is_idle(struct page *page)
 {
-       return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return false;
+
+       return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 
 static inline void set_page_idle(struct page *page)
 {
-       set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return;
+
+       set_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 
 static inline void clear_page_idle(struct page *page)
 {
-       clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       if (unlikely(!page_ext))
+               return;
+
+       clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
 }
 #endif /* CONFIG_64BIT */
 
index d921afd5f10907c8d8058c0136463f6fcf54f345..12343caa114ef3ec27a5ab5a3caaf5320cbe6508 100644 (file)
@@ -175,6 +175,8 @@ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
 int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
                struct device_node *np_config, struct pinctrl_map **map,
                unsigned *num_maps, enum pinctrl_map_type type);
+void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
+               struct pinctrl_map *map, unsigned num_maps);
 
 static inline int pinconf_generic_dt_node_to_map_group(
                struct pinctrl_dev *pctldev, struct device_node *np_config,
diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h
new file mode 100644 (file)
index 0000000..6791779
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2016 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __OMAPDSS_PDATA_H
+#define __OMAPDSS_PDATA_H
+
+enum omapdss_version {
+       OMAPDSS_VER_UNKNOWN = 0,
+       OMAPDSS_VER_OMAP24xx,
+       OMAPDSS_VER_OMAP34xx_ES1,       /* OMAP3430 ES1.0, 2.0 */
+       OMAPDSS_VER_OMAP34xx_ES3,       /* OMAP3430 ES3.0+ */
+       OMAPDSS_VER_OMAP3630,
+       OMAPDSS_VER_AM35xx,
+       OMAPDSS_VER_OMAP4430_ES1,       /* OMAP4430 ES1.0 */
+       OMAPDSS_VER_OMAP4430_ES2,       /* OMAP4430 ES2.0, 2.1, 2.2 */
+       OMAPDSS_VER_OMAP4,              /* All other OMAP4s */
+       OMAPDSS_VER_OMAP5,
+       OMAPDSS_VER_AM43xx,
+       OMAPDSS_VER_DRA7xx,
+};
+
+/* Board specific data */
+struct omap_dss_board_info {
+       const char *default_display_name;
+       int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask);
+       void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask);
+       int (*set_min_bus_tput)(struct device *dev, unsigned long r);
+       enum omapdss_version version;
+};
+
+#endif /* __OMAPDSS_PDATA_H */
index 5b5a80cc59265882813cddba568b0875797e5e6b..c818772d9f9d13538309226a89894b03a78c1aa4 100644 (file)
@@ -43,10 +43,8 @@ struct posix_acl_entry {
 };
 
 struct posix_acl {
-       union {
-               atomic_t                a_refcount;
-               struct rcu_head         a_rcu;
-       };
+       atomic_t                a_refcount;
+       struct rcu_head         a_rcu;
        unsigned int            a_count;
        struct posix_acl_entry  a_entries[0];
 };
index 17018f3c066ed5a44c76ca77ce63b6203725ef34..c038ae36b10e7580645520d443d5faa8a792e645 100644 (file)
@@ -235,6 +235,9 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
        if (!pwm)
                return -EINVAL;
 
+       if (duty_ns < 0 || period_ns < 0)
+               return -EINVAL;
+
        pwm_get_state(pwm, &state);
        if (state.duty_cycle == duty_ns && state.period == period_ns)
                return 0;
@@ -461,6 +464,8 @@ static inline bool pwm_can_sleep(struct pwm_device *pwm)
 
 static inline void pwm_apply_args(struct pwm_device *pwm)
 {
+       struct pwm_state state = { };
+
        /*
         * PWM users calling pwm_apply_args() expect to have a fresh config
         * where the polarity and period are set according to pwm_args info.
@@ -473,18 +478,20 @@ static inline void pwm_apply_args(struct pwm_device *pwm)
         * at startup (even if they are actually enabled), thus authorizing
         * polarity setting.
         *
-        * Instead of setting ->enabled to false, we call pwm_disable()
-        * before pwm_set_polarity() to ensure that everything is configured
-        * as expected, and the PWM is really disabled when the user request
-        * it.
+        * To fulfill this requirement, we apply a new state which disables
+        * the PWM device and set the reference period and polarity config.
         *
         * Note that PWM users requiring a smooth handover between the
         * bootloader and the kernel (like critical regulators controlled by
         * PWM devices) will have to switch to the atomic API and avoid calling
         * pwm_apply_args().
         */
-       pwm_disable(pwm);
-       pwm_set_polarity(pwm, pwm->args.polarity);
+
+       state.enabled = false;
+       state.polarity = pwm->args.polarity;
+       state.period = pwm->args.period;
+
+       pwm_apply_state(pwm, &state);
 }
 
 struct pwm_lookup {
index 6ae8cb4a61d39969283a382076c8cf261a77bfa6..6c876a63558d71c4ff3a957fba45cfc3b2bbbc3a 100644 (file)
@@ -49,6 +49,7 @@ struct qed_start_vport_params {
        bool drop_ttl0;
        u8 vport_id;
        u16 mtu;
+       bool clear_stats;
 };
 
 struct qed_stop_rxq_params {
index cb4b7e8cee81a40cbfedf0a781c7f701ed9e6ee0..eca6f626c16e7d513489341327588846e1239ffb 100644 (file)
@@ -407,6 +407,7 @@ static inline __must_check
 void **radix_tree_iter_retry(struct radix_tree_iter *iter)
 {
        iter->next_index = iter->index;
+       iter->tags = 0;
        return NULL;
 }
 
index 49d057655d62b0614afbeea04b2d2b47bc97e7ea..b0f305e77b7f05e30bcd6ce4e7e566f640cccb80 100644 (file)
@@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class;
 extern struct lock_class_key reservation_seqcount_class;
 extern const char reservation_seqcount_string[];
 
+/**
+ * struct reservation_object_list - a list of shared fences
+ * @rcu: for internal use
+ * @shared_count: table of shared fences
+ * @shared_max: for growing shared fence table
+ * @shared: shared fence table
+ */
 struct reservation_object_list {
        struct rcu_head rcu;
        u32 shared_count, shared_max;
        struct fence __rcu *shared[];
 };
 
+/**
+ * struct reservation_object - a reservation object manages fences for a buffer
+ * @lock: update side lock
+ * @seq: sequence count for managing RCU read-side synchronization
+ * @fence_excl: the exclusive fence, if there is one currently
+ * @fence: list of current shared fences
+ * @staged: staged copy of shared fences for RCU updates
+ */
 struct reservation_object {
        struct ww_mutex lock;
        seqcount_t seq;
@@ -68,6 +83,10 @@ struct reservation_object {
 #define reservation_object_assert_held(obj) \
        lockdep_assert_held(&(obj)->lock.base)
 
+/**
+ * reservation_object_init - initialize a reservation object
+ * @obj: the reservation object
+ */
 static inline void
 reservation_object_init(struct reservation_object *obj)
 {
@@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj)
        obj->staged = NULL;
 }
 
+/**
+ * reservation_object_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
 static inline void
 reservation_object_fini(struct reservation_object *obj)
 {
@@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj)
        ww_mutex_destroy(&obj->lock);
 }
 
+/**
+ * reservation_object_get_list - get the reservation object's
+ * shared fence list, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list.  Does NOT take references to
+ * the fence.  The obj->lock must be held.
+ */
 static inline struct reservation_object_list *
 reservation_object_get_list(struct reservation_object *obj)
 {
@@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj)
                                         reservation_object_held(obj));
 }
 
+/**
+ * reservation_object_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any).  Does NOT take a
+ * reference.  The obj->lock must be held.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
 static inline struct fence *
 reservation_object_get_excl(struct reservation_object *obj)
 {
@@ -120,6 +162,17 @@ reservation_object_get_excl(struct reservation_object *obj)
                                         reservation_object_held(obj));
 }
 
+/**
+ * reservation_object_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
 static inline struct fence *
 reservation_object_get_excl_rcu(struct reservation_object *obj)
 {
index ec0306ce7b92ab6f0e4bb420347ca9301b1355e3..45a4abeb6acba0193fffd134176a956950b53a4a 100644 (file)
@@ -84,8 +84,8 @@ static inline struct reset_control *__devm_reset_control_get(
 #endif /* CONFIG_RESET_CONTROLLER */
 
 /**
- * reset_control_get - Lookup and obtain an exclusive reference to a
- *                     reset controller.
+ * reset_control_get_exclusive - Lookup and obtain an exclusive reference
+ *                               to a reset controller.
  * @dev: device to be reset by the controller
  * @id: reset line name
  *
@@ -98,8 +98,8 @@ static inline struct reset_control *__devm_reset_control_get(
  *
  * Use of id names is optional.
  */
-static inline struct reset_control *__must_check reset_control_get(
-                                       struct device *dev, const char *id)
+static inline struct reset_control *
+__must_check reset_control_get_exclusive(struct device *dev, const char *id)
 {
 #ifndef CONFIG_RESET_CONTROLLER
        WARN_ON(1);
@@ -107,12 +107,6 @@ static inline struct reset_control *__must_check reset_control_get(
        return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
 }
 
-static inline struct reset_control *reset_control_get_optional(
-                                       struct device *dev, const char *id)
-{
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
-}
-
 /**
  * reset_control_get_shared - Lookup and obtain a shared reference to a
  *                            reset controller.
@@ -141,9 +135,21 @@ static inline struct reset_control *reset_control_get_shared(
        return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
 }
 
+static inline struct reset_control *reset_control_get_optional_exclusive(
+                                       struct device *dev, const char *id)
+{
+       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 0);
+}
+
+static inline struct reset_control *reset_control_get_optional_shared(
+                                       struct device *dev, const char *id)
+{
+       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, 1);
+}
+
 /**
- * of_reset_control_get - Lookup and obtain an exclusive reference to a
- *                        reset controller.
+ * of_reset_control_get_exclusive - Lookup and obtain an exclusive reference
+ *                                  to a reset controller.
  * @node: device to be reset by the controller
  * @id: reset line name
  *
@@ -151,15 +157,41 @@ static inline struct reset_control *reset_control_get_shared(
  *
  * Use of id names is optional.
  */
-static inline struct reset_control *of_reset_control_get(
+static inline struct reset_control *of_reset_control_get_exclusive(
                                struct device_node *node, const char *id)
 {
        return __of_reset_control_get(node, id, 0, 0);
 }
 
 /**
- * of_reset_control_get_by_index - Lookup and obtain an exclusive reference to
- *                                 a reset controller by index.
+ * of_reset_control_get_shared - Lookup and obtain an shared reference
+ *                               to a reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get_shared(
+                               struct device_node *node, const char *id)
+{
+       return __of_reset_control_get(node, id, 0, 1);
+}
+
+/**
+ * of_reset_control_get_exclusive_by_index - Lookup and obtain an exclusive
+ *                                           reference to a reset controller
+ *                                           by index.
  * @node: device to be reset by the controller
  * @index: index of the reset controller
  *
@@ -167,49 +199,60 @@ static inline struct reset_control *of_reset_control_get(
  * in whatever order. Returns a struct reset_control or IS_ERR() condition
  * containing errno.
  */
-static inline struct reset_control *of_reset_control_get_by_index(
+static inline struct reset_control *of_reset_control_get_exclusive_by_index(
                                        struct device_node *node, int index)
 {
        return __of_reset_control_get(node, NULL, index, 0);
 }
 
 /**
- * devm_reset_control_get - resource managed reset_control_get()
- * @dev: device to be reset by the controller
- * @id: reset line name
+ * of_reset_control_get_shared_by_index - Lookup and obtain an shared
+ *                                        reference to a reset controller
+ *                                        by index.
+ * @node: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * When a reset-control is shared, the behavior of reset_control_assert /
+ * deassert is changed, the reset-core will keep track of a deassert_count
+ * and only (re-)assert the reset after reset_control_assert has been called
+ * as many times as reset_control_deassert was called. Also see the remark
+ * about shared reset-controls in the reset_control_assert docs.
+ *
+ * Calling reset_control_assert without first calling reset_control_deassert
+ * is not allowed on a shared reset control. Calling reset_control_reset is
+ * also not allowed on a shared reset control.
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
  *
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
+ * This is to be used to perform a list of resets for a device or power domain
+ * in whatever order. Returns a struct reset_control or IS_ERR() condition
+ * containing errno.
  */
-static inline struct reset_control *__must_check devm_reset_control_get(
-                                       struct device *dev, const char *id)
-{
-#ifndef CONFIG_RESET_CONTROLLER
-       WARN_ON(1);
-#endif
-       return __devm_reset_control_get(dev, id, 0, 0);
-}
-
-static inline struct reset_control *devm_reset_control_get_optional(
-                                       struct device *dev, const char *id)
+static inline struct reset_control *of_reset_control_get_shared_by_index(
+                                       struct device_node *node, int index)
 {
-       return __devm_reset_control_get(dev, id, 0, 0);
+       return __of_reset_control_get(node, NULL, index, 1);
 }
 
 /**
- * devm_reset_control_get_by_index - resource managed reset_control_get
+ * devm_reset_control_get_exclusive - resource managed
+ *                                    reset_control_get_exclusive()
  * @dev: device to be reset by the controller
- * @index: index of the reset controller
+ * @id: reset line name
  *
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
+ * Managed reset_control_get_exclusive(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_exclusive() for more information.
  */
-static inline struct reset_control *devm_reset_control_get_by_index(
-                                       struct device *dev, int index)
+static inline struct reset_control *
+__must_check devm_reset_control_get_exclusive(struct device *dev,
+                                             const char *id)
 {
-       return __devm_reset_control_get(dev, NULL, index, 0);
+#ifndef CONFIG_RESET_CONTROLLER
+       WARN_ON(1);
+#endif
+       return __devm_reset_control_get(dev, id, 0, 0);
 }
 
 /**
@@ -227,6 +270,36 @@ static inline struct reset_control *devm_reset_control_get_shared(
        return __devm_reset_control_get(dev, id, 0, 1);
 }
 
+static inline struct reset_control *devm_reset_control_get_optional_exclusive(
+                                       struct device *dev, const char *id)
+{
+       return __devm_reset_control_get(dev, id, 0, 0);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional_shared(
+                                       struct device *dev, const char *id)
+{
+       return __devm_reset_control_get(dev, id, 0, 1);
+}
+
+/**
+ * devm_reset_control_get_exclusive_by_index - resource managed
+ *                                             reset_control_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @index: index of the reset controller
+ *
+ * Managed reset_control_get_exclusive(). For reset controllers returned from
+ * this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_exclusive() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
+{
+       return __devm_reset_control_get(dev, NULL, index, 0);
+}
+
 /**
  * devm_reset_control_get_shared_by_index - resource managed
  * reset_control_get_shared
@@ -237,10 +310,60 @@ static inline struct reset_control *devm_reset_control_get_shared(
  * this function, reset_control_put() is called automatically on driver detach.
  * See reset_control_get_shared() for more information.
  */
-static inline struct reset_control *devm_reset_control_get_shared_by_index(
-                                       struct device *dev, int index)
+static inline struct reset_control *
+devm_reset_control_get_shared_by_index(struct device *dev, int index)
 {
        return __devm_reset_control_get(dev, NULL, index, 1);
 }
 
+/*
+ * TEMPORARY calls to use during transition:
+ *
+ *   of_reset_control_get() => of_reset_control_get_exclusive()
+ *
+ * These inline function calls will be removed once all consumers
+ * have been moved over to the new explicit API.
+ */
+static inline struct reset_control *reset_control_get(
+                               struct device *dev, const char *id)
+{
+       return reset_control_get_exclusive(dev, id);
+}
+
+static inline struct reset_control *reset_control_get_optional(
+                                       struct device *dev, const char *id)
+{
+       return reset_control_get_optional_exclusive(dev, id);
+}
+
+static inline struct reset_control *of_reset_control_get(
+                               struct device_node *node, const char *id)
+{
+       return of_reset_control_get_exclusive(node, id);
+}
+
+static inline struct reset_control *of_reset_control_get_by_index(
+                               struct device_node *node, int index)
+{
+       return of_reset_control_get_exclusive_by_index(node, index);
+}
+
+static inline struct reset_control *devm_reset_control_get(
+                               struct device *dev, const char *id)
+{
+       return devm_reset_control_get_exclusive(dev, id);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional(
+                               struct device *dev, const char *id)
+{
+       return devm_reset_control_get_optional_exclusive(dev, id);
+
+}
+
+static inline struct reset_control *devm_reset_control_get_by_index(
+                               struct device *dev, int index)
+{
+       return devm_reset_control_get_exclusive_by_index(dev, index);
+}
 #endif
index 49eb4f8ebac9636a394dbe097532c70a08e056d7..2b0fad83683f793959fbe330045abb7c684edfcf 100644 (file)
@@ -158,7 +158,7 @@ struct anon_vma *page_get_anon_vma(struct page *page);
 /*
  * rmap interfaces called when adding or removing pte of page
  */
-void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_move_anon_rmap(struct page *, struct vm_area_struct *);
 void page_add_anon_rmap(struct page *, struct vm_area_struct *,
                unsigned long, bool);
 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
index 6e42ada26345d507ffdec856107382a9fb67656a..253538f29ade890dcd48d1708fcd15ae362653be 100644 (file)
@@ -3007,7 +3007,7 @@ static inline int object_is_on_stack(void *obj)
        return (obj >= stack) && (obj < (stack + THREAD_SIZE));
 }
 
-extern void thread_info_cache_init(void);
+extern void thread_stack_cache_init(void);
 
 #ifdef CONFIG_DEBUG_STACK_USAGE
 static inline unsigned long stack_not_used(struct task_struct *p)
index dacb5e71199435a1062ea4c74151649f1d64f1e8..de1f64318fc4ec02aa5f2b449bbbd152bc1a0144 100644 (file)
@@ -765,6 +765,8 @@ struct sctp_info {
        __u8    sctpi_s_disable_fragments;
        __u8    sctpi_s_v4mapped;
        __u8    sctpi_s_frag_interleave;
+       __u32   sctpi_s_type;
+       __u32   __reserved3;
 };
 
 struct sctp_infox {
index 7973a821ac58877a56ff5214793786948170ae8e..ead97654c4e9afa7239e068cb37f6981d77ce6a3 100644 (file)
@@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
 
 static inline int raw_read_seqcount_latch(seqcount_t *s)
 {
-       return lockless_dereference(s)->sequence;
+       int seq = READ_ONCE(s->sequence);
+       /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
+       smp_read_barrier_depends();
+       return seq;
 }
 
 /**
@@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
  *     unsigned seq, idx;
  *
  *     do {
- *             seq = lockless_dereference(latch)->seq;
+ *             seq = raw_read_seqcount_latch(&latch->seq);
  *
  *             idx = seq & 0x01;
  *             entry = data_query(latch->data[idx], ...);
index ee38a41274759f279be1c0752a7fab63fac517c8..f39b37180c414deb6d71c0ab5d674f89958630c0 100644 (file)
@@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
 }
 
 void __skb_get_hash(struct sk_buff *skb);
+u32 __skb_get_hash_symmetric(struct sk_buff *skb);
 u32 skb_get_poff(const struct sk_buff *skb);
 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
                   const struct flow_keys *keys, int hlen);
@@ -2869,6 +2870,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
                skb->csum = csum_partial(start, len, skb->csum);
 }
 
+/**
+ *     skb_push_rcsum - push skb and update receive checksum
+ *     @skb: buffer to update
+ *     @len: length of data pulled
+ *
+ *     This function performs an skb_push on the packet and updates
+ *     the CHECKSUM_COMPLETE checksum.  It should be used on
+ *     receive path processing instead of skb_push unless you know
+ *     that the checksum difference is zero (e.g., a valid IP header)
+ *     or you are setting ip_summed to CHECKSUM_NONE.
+ */
+static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
+                                           unsigned int len)
+{
+       skb_push(skb, len);
+       skb_postpush_rcsum(skb, skb->data, len);
+       return skb->data;
+}
+
 /**
  *     pskb_trim_rcsum - trim received skb and update checksum
  *     @skb: buffer to trim
index 4018b48f2b3b4f115802fba8b67fcc3cd113c773..a0596ca0e80ac77aeb0afa29648532ef51a5deae 100644 (file)
@@ -36,6 +36,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
 {
        switch (sk->sk_family) {
        case AF_INET:
+               if (sk->sk_type == SOCK_RAW)
+                       return SKNLGRP_NONE;
+
                switch (sk->sk_protocol) {
                case IPPROTO_TCP:
                        return SKNLGRP_INET_TCP_DESTROY;
@@ -45,6 +48,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
                        return SKNLGRP_NONE;
                }
        case AF_INET6:
+               if (sk->sk_type == SOCK_RAW)
+                       return SKNLGRP_NONE;
+
                switch (sk->sk_protocol) {
                case IPPROTO_TCP:
                        return SKNLGRP_INET6_TCP_DESTROY;
index 19c659d1c0f86cd40fb9b90f48dac0cb7071b560..b6810c92b8bb14d9ef8c9d1036a0b25d40530ac3 100644 (file)
@@ -137,8 +137,6 @@ struct rpc_create_args {
 #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT     (1UL << 9)
 
 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
-                                       struct rpc_xprt *xprt);
 struct rpc_clnt        *rpc_bind_new_program(struct rpc_clnt *,
                                const struct rpc_program *, u32);
 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
index b7dabc4baafd817d0b2a41c5cf8e54e4b3fddc47..79ba50856707b9b9a619be78335b27b383312ed9 100644 (file)
@@ -84,6 +84,7 @@ struct svc_xprt {
 
        struct net              *xpt_net;
        struct rpc_xprt         *xpt_bc_xprt;   /* NFSv4.1 backchannel */
+       struct rpc_xprt_switch  *xpt_bc_xps;    /* NFSv4.1 backchannel */
 };
 
 static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
index 5aa3834619a8c3222be6530f927ef30349e2fa74..5e3e1b63dbb3c97f0145bd15a23dba7d4e916528 100644 (file)
@@ -297,6 +297,7 @@ struct xprt_create {
        size_t                  addrlen;
        const char              *servername;
        struct svc_xprt         *bc_xprt;       /* NFSv4.1 backchannel */
+       struct rpc_xprt_switch  *bc_xps;
        unsigned int            flags;
 };
 
index e45abe7db9a627dda034f7d4616fad2d3ae1f920..ee517bef0db04902412c17b7eceac24839a4564f 100644 (file)
@@ -335,6 +335,8 @@ struct thermal_genl_event {
  * @get_trend: a pointer to a function that reads the sensor temperature trend.
  * @set_emul_temp: a pointer to a function that sets sensor emulated
  *                temperature.
+ * @set_trip_temp: a pointer to a function that sets the trip temperature on
+ *                hardware.
  */
 struct thermal_zone_of_device_ops {
        int (*get_temp)(void *, int *);
index 37dbacf84849add3a31b410ee06d26ec08c56a07..816b7543f81bca3a31fb005b69c32daab72383c7 100644 (file)
@@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv,
        struct timespec64 ts64;
 
        if (!tv)
+               return do_sys_settimeofday64(NULL, tz);
+
+       if (!timespec_valid(tv))
                return -EINVAL;
 
        ts64 = timespec_to_timespec64(*tv);
index 966889a20ea375a29fdba96ab2615babf073e8d5..e479033bd7829148de1a63928c6db99ed91295aa 100644 (file)
@@ -180,11 +180,11 @@ struct ehci_regs {
  * PORTSCx
  */
        /* HOSTPC: offset 0x84 */
-       u32             hostpc[1];      /* HOSTPC extension */
+       u32             hostpc[0];      /* HOSTPC extension */
 #define HOSTPC_PHCD    (1<<22)         /* Phy clock disable */
 #define HOSTPC_PSPD    (3<<25)         /* Port speed detection */
 
-       u32             reserved5[16];
+       u32             reserved5[17];
 
        /* USBMODE_EX: offset 0xc8 */
        u32             usbmode_ex;     /* USB Device mode extension */
index 457651bf45b0bd9cad8cebe48cca5f8c005a5579..fefe8b06a63dbc15c28b9c09a76e14c03f6ae1e2 100644 (file)
@@ -1034,6 +1034,8 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
  * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
  *     this driver will be bound to any available UDC.
  * @pending: UDC core private data used for deferred probe of this driver.
+ * @match_existing_only: If udc is not found, return an error and don't add this
+ *      gadget driver to list of pending driver
  *
  * Devices are disabled till a gadget driver successfully bind()s, which
  * means the driver will handle setup() requests needed to enumerate (and
@@ -1097,6 +1099,7 @@ struct usb_gadget_driver {
 
        char                    *udc_name;
        struct list_head        pending;
+       unsigned                match_existing_only:1;
 };
 
 
index 0b3da40a525ecf2370f528620d5e85370904325d..d315c89078698b60e0d76a15940b0c390f0b5f12 100644 (file)
@@ -142,10 +142,11 @@ enum musb_vbus_id_status {
 };
 
 #if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
-void musb_mailbox(enum musb_vbus_id_status status);
+int musb_mailbox(enum musb_vbus_id_status status);
 #else
-static inline void musb_mailbox(enum musb_vbus_id_status status)
+static inline int musb_mailbox(enum musb_vbus_id_status status)
 {
+       return 0;
 }
 #endif
 
index b39a5f3153bd3f1754d81e1914673a281a14814c..960bedbdec87b3e4586f487d57223dccc8b8a4fd 100644 (file)
@@ -165,6 +165,7 @@ int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
 
 int vga_switcheroo_process_delayed_switch(void);
 
+bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
 enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
 
 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
@@ -188,6 +189,7 @@ static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(v
 static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
 static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
 static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
 static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
 
 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
index 791800ddd6d90de23d7b7acea7a91e500e646eda..6360c259da6d62cd3c4b99c858acd6db19eff401 100644 (file)
@@ -34,6 +34,9 @@
 
 #define BOND_DEFAULT_MIIMON    100
 
+#ifndef __long_aligned
+#define __long_aligned __attribute__((aligned((sizeof(long)))))
+#endif
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
@@ -138,7 +141,9 @@ struct bond_params {
        struct reciprocal_value reciprocal_packets_per_slave;
        u16 ad_actor_sys_prio;
        u16 ad_user_port_key;
-       u8 ad_actor_system[ETH_ALEN];
+
+       /* 2 bytes of padding : see ether_addr_equal_64bits() */
+       u8 ad_actor_system[ETH_ALEN + 2];
 };
 
 struct bond_parm_tbl {
index 48103cf94e976e9c13809cfec895e9bb1c9fa96c..13de0ccaa0594172a946572bbc04397ab0a89032 100644 (file)
@@ -42,6 +42,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 
 int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
                      struct sockaddr __user **, struct iovec **);
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
 asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
                                   unsigned int);
 asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
index 5dce30a6abe3e67091c5d14facd2750edd8426f0..7a54a31d1d4cf7988ff7bd3bf3015b4a8f23e359 100644 (file)
@@ -26,7 +26,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version);
 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
                                       u8 name_assign_type);
 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-                    bool *csum_err, __be16 proto);
+                    bool *csum_err, __be16 proto, int nhs);
 
 static inline int gre_calc_hlen(__be16 o_flags)
 {
index 37165fba3741ac68e5a93a8a22473eae70361e45..08f36cd2b874b5493bc9b51c4071722591dcc321 100644 (file)
@@ -313,10 +313,9 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
        return min(dst->dev->mtu, IP_MAX_MTU);
 }
 
-static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
+static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+                                         const struct sk_buff *skb)
 {
-       struct sock *sk = skb->sk;
-
        if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
                bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
 
index d325c81332e353e7fa91c05121cc4ff8786ea7e2..43a5a0e4524cbbd9daa6f9bd0b50a92b229deabd 100644 (file)
@@ -63,6 +63,8 @@ struct ip6_tnl_encap_ops {
                            u8 *protocol, struct flowi6 *fl6);
 };
 
+#ifdef CONFIG_INET
+
 extern const struct ip6_tnl_encap_ops __rcu *
                ip6tun_encaps[MAX_IPTUN_ENCAP_OPS];
 
@@ -138,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev);
 int ip6_tnl_get_iflink(const struct net_device *dev);
 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu);
 
-#ifdef CONFIG_INET
 static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
                                  struct net_device *dev)
 {
index af4c10ebb2414494e75c279b3d9c91da48442982..cd6018a9ee2467cec1fbe4d621fc294bc843a05e 100644 (file)
@@ -1232,7 +1232,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 const char *ip_vs_state_name(__u16 proto, int state);
 
 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
-int ip_vs_check_template(struct ip_vs_conn *ct);
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
 void ip_vs_random_dropentry(struct netns_ipvs *ipvs);
 int ip_vs_conn_init(void);
 void ip_vs_conn_cleanup(void);
index dd78bea227c8b0baf2fd14eb878f26f265ac76e0..b6083c34ef0d4ed0d03f9c030307f335d3a4cba0 100644 (file)
@@ -284,6 +284,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
        return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
 }
 
+/* jiffies until ct expires, 0 if already expired */
+static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+{
+       long timeout = (long)ct->timeout.expires - (long)jiffies;
+
+       return timeout > 0 ? timeout : 0;
+}
+
 struct kernel_param;
 
 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
index 9c5638ad872e39d2d01cca6f86c1620e499c916c..0dbce55437f2c57cc1642d865a91ffc4169c9866 100644 (file)
@@ -28,8 +28,8 @@ struct nf_queue_handler {
                                                struct nf_hook_ops *ops);
 };
 
-void nf_register_queue_handler(const struct nf_queue_handler *qh);
-void nf_unregister_queue_handler(void);
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
+void nf_unregister_queue_handler(struct net *net);
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
 void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
index 092235458691fd5eadc09cab3266d100f7140dc6..f7c291ff4074510333888184b3b481c77eefc546 100644 (file)
@@ -167,6 +167,7 @@ struct nft_set_elem {
 
 struct nft_set;
 struct nft_set_iter {
+       u8              genmask;
        unsigned int    count;
        unsigned int    skip;
        int             err;
index 38aa4983e2a90bd5a391e844e0556591e5d85f60..36d723579af21f78e2da780c4840762ce88a4770 100644 (file)
@@ -5,11 +5,13 @@
 
 struct proc_dir_entry;
 struct nf_logger;
+struct nf_queue_handler;
 
 struct netns_nf {
 #if defined CONFIG_PROC_FS
        struct proc_dir_entry *proc_netfilter;
 #endif
+       const struct nf_queue_handler __rcu *queue_handler;
        const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *nf_log_dir_header;
index 0f7efa88f210360928b36a80022c9bd53d778b40..3722dda0199ddaa0e5e8554c57c843968075a7b9 100644 (file)
@@ -392,16 +392,20 @@ struct tc_cls_u32_offload {
        };
 };
 
-static inline bool tc_should_offload(struct net_device *dev, u32 flags)
+static inline bool tc_should_offload(const struct net_device *dev,
+                                    const struct tcf_proto *tp, u32 flags)
 {
+       const struct Qdisc *sch = tp->q;
+       const struct Qdisc_class_ops *cops = sch->ops->cl_ops;
+
        if (!(dev->features & NETIF_F_HW_TC))
                return false;
-
        if (flags & TCA_CLS_FLAGS_SKIP_HW)
                return false;
-
        if (!dev->netdev_ops->ndo_setup_tc)
                return false;
+       if (cops && cops->tcf_cl_offload)
+               return cops->tcf_cl_offload(tp->classid);
 
        return true;
 }
index 401038d2f9b88c536788507b6168675333d02fe0..fea53f4d92ca6bb16c367ee772d4c8c1aa19b701 100644 (file)
@@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound)
 }
 
 struct qdisc_watchdog {
+       u64             last_expires;
        struct hrtimer  timer;
        struct Qdisc    *qdisc;
 };
index a1fd76c22a5903cadf2e97c844017925cfbc9f93..62d553184e91c5c6f27da411f9c448fef91ded0f 100644 (file)
@@ -168,6 +168,7 @@ struct Qdisc_class_ops {
 
        /* Filter manipulation */
        struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
+       bool                    (*tcf_cl_offload)(u32 classid);
        unsigned long           (*bind_tcf)(struct Qdisc *, unsigned long,
                                        u32 classid);
        void                    (*unbind_tcf)(struct Qdisc *, unsigned long);
@@ -691,9 +692,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
        /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
        if (!sch->gso_skb) {
                sch->gso_skb = sch->dequeue(sch);
-               if (sch->gso_skb)
+               if (sch->gso_skb) {
                        /* it's still part of the queue */
+                       qdisc_qstats_backlog_inc(sch, sch->gso_skb);
                        sch->q.qlen++;
+               }
        }
 
        return sch->gso_skb;
@@ -706,6 +709,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
 
        if (skb) {
                sch->gso_skb = NULL;
+               qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
        } else {
                skb = sch->dequeue(sch);
index 649d2a8c17fc36f04b4d317c41c33edd6cdfc8b7..ff5be7e8ddeae6f9d2f9eac889d7abbfbd396bbd 100644 (file)
@@ -1576,7 +1576,13 @@ static inline void sock_put(struct sock *sk)
  */
 void sock_gen_put(struct sock *sk);
 
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
+                    unsigned int trim_cap);
+static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+                                const int nested)
+{
+       return __sk_receive_skb(sk, skb, nested, 1);
+}
 
 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
 {
index 985619a593230a6532017e4f61dfaa945b04a08f..1d8e158241da742eb845ac30bab4292cec33d479 100644 (file)
@@ -60,7 +60,7 @@ struct switchdev_attr {
                struct netdev_phys_item_id ppid;        /* PORT_PARENT_ID */
                u8 stp_state;                           /* PORT_STP_STATE */
                unsigned long brport_flags;             /* PORT_BRIDGE_FLAGS */
-               u32 ageing_time;                        /* BRIDGE_AGEING_TIME */
+               clock_t ageing_time;                    /* BRIDGE_AGEING_TIME */
                bool vlan_filtering;                    /* BRIDGE_VLAN_FILTERING */
        } u;
 };
index dc9a09aefb3368cec313d31af7cbc60d76c54fe8..c55facd17b7ec0b4c38ecf7e3690118056a93567 100644 (file)
@@ -36,7 +36,7 @@ struct tcf_meta_ops {
        int     (*encode)(struct sk_buff *, void *, struct tcf_meta_info *);
        int     (*decode)(struct sk_buff *, void *, u16 len);
        int     (*get)(struct sk_buff *skb, struct tcf_meta_info *mi);
-       int     (*alloc)(struct tcf_meta_info *, void *);
+       int     (*alloc)(struct tcf_meta_info *, void *, gfp_t);
        void    (*release)(struct tcf_meta_info *);
        int     (*validate)(void *val, int len);
        struct module   *owner;
@@ -48,8 +48,8 @@ int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
 int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
                        const void *dval);
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval);
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval);
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
 int ife_validate_meta_u32(void *val, int len);
index 432bed510369e7b19a4773ec5a8bb09b8d9bb1e6..7e440d41487aa8671fbb8952fb01e2bbf63b11f0 100644 (file)
@@ -217,10 +217,10 @@ enum ib_device_cap_flags {
        IB_DEVICE_CROSS_CHANNEL         = (1 << 27),
        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
        IB_DEVICE_SIGNATURE_HANDOVER            = (1 << 30),
-       IB_DEVICE_ON_DEMAND_PAGING              = (1 << 31),
+       IB_DEVICE_ON_DEMAND_PAGING              = (1ULL << 31),
        IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
-       IB_DEVICE_VIRTUAL_FUNCTION              = ((u64)1 << 33),
-       IB_DEVICE_RAW_SCATTER_FCS               = ((u64)1 << 34),
+       IB_DEVICE_VIRTUAL_FUNCTION              = (1ULL << 33),
+       IB_DEVICE_RAW_SCATTER_FCS               = (1ULL << 34),
 };
 
 enum ib_signature_prot_cap {
index 16274e2133cdcca109fa0d40f1b9bf591493d27e..9c9a27d42aaa5e89aa778d3597cb54b13c270ecc 100644 (file)
@@ -203,7 +203,9 @@ struct rvt_driver_provided {
 
        /*
         * Allocate a private queue pair data structure for driver specific
-        * information which is opaque to rdmavt.
+        * information which is opaque to rdmavt.  Errors are returned via
+        * ERR_PTR(err).  The driver is free to return NULL or a valid
+        * pointer.
         */
        void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
                                gfp_t gfp);
index fc3a481ad91ecdb5f25fd5c3c7d9b210d4f74f5e..530c57bdefa06567c3bcaba9249845af1d4e535a 100644 (file)
@@ -53,18 +53,19 @@ struct hdmi_codec_params {
        int channels;
 };
 
+struct hdmi_codec_pdata;
 struct hdmi_codec_ops {
        /*
         * Called when ASoC starts an audio stream setup.
         * Optional
         */
-       int (*audio_startup)(struct device *dev);
+       int (*audio_startup)(struct device *dev, void *data);
 
        /*
         * Configures HDMI-encoder for audio stream.
         * Mandatory
         */
-       int (*hw_params)(struct device *dev,
+       int (*hw_params)(struct device *dev, void *data,
                         struct hdmi_codec_daifmt *fmt,
                         struct hdmi_codec_params *hparms);
 
@@ -72,19 +73,20 @@ struct hdmi_codec_ops {
         * Shuts down the audio stream.
         * Mandatory
         */
-       void (*audio_shutdown)(struct device *dev);
+       void (*audio_shutdown)(struct device *dev, void *data);
 
        /*
         * Mute/unmute HDMI audio stream.
         * Optional
         */
-       int (*digital_mute)(struct device *dev, bool enable);
+       int (*digital_mute)(struct device *dev, void *data, bool enable);
 
        /*
         * Provides EDID-Like-Data from connected HDMI device.
         * Optional
         */
-       int (*get_eld)(struct device *dev, uint8_t *buf, size_t len);
+       int (*get_eld)(struct device *dev, void *data,
+                      uint8_t *buf, size_t len);
 };
 
 /* HDMI codec initalization data */
@@ -93,6 +95,7 @@ struct hdmi_codec_pdata {
        uint i2s:1;
        uint spdif:1;
        int max_i2s_channels;
+       void *data;
 };
 
 #define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
index afdb416898e0c19a6ac40578a5f81c5436031c64..1df2ff61a4dd95dda25c5962a6335cc6fb9ca44f 100644 (file)
  *
  */
 
-#include <video/omapdss.h>
-
 #ifndef __OMAP_HDMI_AUDIO_H__
 #define __OMAP_HDMI_AUDIO_H__
 
+#include <linux/platform_data/omapdss.h>
+
+struct omap_dss_audio {
+       struct snd_aes_iec958 *iec;
+       struct snd_cea_861_aud_if *cea;
+};
+
 struct omap_hdmi_audio_ops {
        int (*audio_startup)(struct device *dev,
                             void (*abort_cb)(struct device *dev));
index cdecf87576e890f48c7e93830c920fa33ff9b85e..462246aa200e1db1890de7d6640c81e859da250d 100644 (file)
@@ -487,6 +487,22 @@ struct drm_amdgpu_cs_chunk_data {
 #define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8
 #define AMDGPU_INFO_MMR_SH_INDEX_MASK  0xff
 
+struct drm_amdgpu_query_fw {
+       /** AMDGPU_INFO_FW_* */
+       __u32 fw_type;
+       /**
+        * Index of the IP if there are more IPs of
+        * the same type.
+        */
+       __u32 ip_instance;
+       /**
+        * Index of the engine. Whether this is used depends
+        * on the firmware type. (e.g. MEC, SDMA)
+        */
+       __u32 index;
+       __u32 _pad;
+};
+
 /* Input structure for the INFO ioctl */
 struct drm_amdgpu_info {
        /* Where the return value will be stored */
@@ -522,21 +538,7 @@ struct drm_amdgpu_info {
                        __u32 flags;
                } read_mmr_reg;
 
-               struct {
-                       /** AMDGPU_INFO_FW_* */
-                       __u32 fw_type;
-                       /**
-                        * Index of the IP if there are more IPs of
-                        * the same type.
-                        */
-                       __u32 ip_instance;
-                       /**
-                        * Index of the engine. Whether this is used depends
-                        * on the firmware type. (e.g. MEC, SDMA)
-                        */
-                       __u32 index;
-                       __u32 _pad;
-               } query_fw;
+               struct drm_amdgpu_query_fw query_fw;
        };
 };
 
index c17d63d8b54379be763de966b7a9ce8b70028aaa..d7e81a3886fdc41e63ec00cd7b1f0c50c7f24ebb 100644 (file)
@@ -361,6 +361,8 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_GPU_RESET        35
 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
 #define I915_PARAM_HAS_EXEC_SOFTPIN     37
+#define I915_PARAM_HAS_POOLED_EU        38
+#define I915_PARAM_MIN_EU_IN_POOL       39
 
 typedef struct drm_i915_getparam {
        __s32 param;
@@ -1171,6 +1173,7 @@ struct drm_i915_gem_context_param {
 #define I915_CONTEXT_PARAM_BAN_PERIOD  0x1
 #define I915_CONTEXT_PARAM_NO_ZEROMAP  0x2
 #define I915_CONTEXT_PARAM_GTT_SIZE    0x3
+#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE    0x4
        __u64 value;
 };
 
index bf19d2cd907810fb3ea0f19564a2cec6ce7496bf..49f778de8e06868c1a12b2e81c47fa73c51c12ec 100644 (file)
@@ -201,6 +201,27 @@ struct drm_msm_wait_fence {
        struct drm_msm_timespec timeout;   /* in */
 };
 
+/* madvise provides a way to tell the kernel in case a buffers contents
+ * can be discarded under memory pressure, which is useful for userspace
+ * bo cache where we want to optimistically hold on to buffer allocate
+ * and potential mmap, but allow the pages to be discarded under memory
+ * pressure.
+ *
+ * Typical usage would involve madvise(DONTNEED) when buffer enters BO
+ * cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
+ * In the WILLNEED case, 'retained' indicates to userspace whether the
+ * backing pages still exist.
+ */
+#define MSM_MADV_WILLNEED 0       /* backing pages are needed, status returned in 'retained' */
+#define MSM_MADV_DONTNEED 1       /* backing pages not needed */
+#define __MSM_MADV_PURGED 2       /* internal state */
+
+struct drm_msm_gem_madvise {
+       __u32 handle;         /* in, GEM handle */
+       __u32 madv;           /* in, MSM_MADV_x */
+       __u32 retained;       /* out, whether backing store still exists */
+};
+
 #define DRM_MSM_GET_PARAM              0x00
 /* placeholder:
 #define DRM_MSM_SET_PARAM              0x01
@@ -211,7 +232,8 @@ struct drm_msm_wait_fence {
 #define DRM_MSM_GEM_CPU_FINI           0x05
 #define DRM_MSM_GEM_SUBMIT             0x06
 #define DRM_MSM_WAIT_FENCE             0x07
-#define DRM_MSM_NUM_IOCTLS             0x08
+#define DRM_MSM_GEM_MADVISE            0x08
+#define DRM_MSM_NUM_IOCTLS             0x09
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -220,6 +242,7 @@ struct drm_msm_wait_fence {
 #define DRM_IOCTL_MSM_GEM_CPU_FINI     DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
 #define DRM_IOCTL_MSM_GEM_SUBMIT       DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
 #define DRM_IOCTL_MSM_WAIT_FENCE       DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
+#define DRM_IOCTL_MSM_GEM_MADVISE      DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
 
 #if defined(__cplusplus)
 }
index af12e8a184c82b890968dfc8d45bcfa97ccb3e4f..ad7edc3edf7ca1d653a0bc025a5eda6692b74370 100644 (file)
@@ -37,6 +37,7 @@ extern "C" {
 #define DRM_VC4_MMAP_BO                           0x04
 #define DRM_VC4_CREATE_SHADER_BO                  0x05
 #define DRM_VC4_GET_HANG_STATE                    0x06
+#define DRM_VC4_GET_PARAM                         0x07
 
 #define DRM_IOCTL_VC4_SUBMIT_CL           DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
 #define DRM_IOCTL_VC4_WAIT_SEQNO          DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
@@ -45,6 +46,7 @@ extern "C" {
 #define DRM_IOCTL_VC4_MMAP_BO             DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
 #define DRM_IOCTL_VC4_CREATE_SHADER_BO    DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
 #define DRM_IOCTL_VC4_GET_HANG_STATE      DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
+#define DRM_IOCTL_VC4_GET_PARAM           DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
 
 struct drm_vc4_submit_rcl_surface {
        __u32 hindex; /* Handle index, or ~0 if not present. */
@@ -280,6 +282,17 @@ struct drm_vc4_get_hang_state {
        __u32 pad[16];
 };
 
+#define DRM_VC4_PARAM_V3D_IDENT0               0
+#define DRM_VC4_PARAM_V3D_IDENT1               1
+#define DRM_VC4_PARAM_V3D_IDENT2               2
+#define DRM_VC4_PARAM_SUPPORTS_BRANCHES                3
+
+struct drm_vc4_get_param {
+       __u32 param;
+       __u32 pad;
+       __u64 value;
+};
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/include/uapi/drm/vgem_drm.h b/include/uapi/drm/vgem_drm.h
new file mode 100644 (file)
index 0000000..bf66f5d
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _UAPI_VGEM_DRM_H_
+#define _UAPI_VGEM_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+#define DRM_VGEM_FENCE_ATTACH  0x1
+#define DRM_VGEM_FENCE_SIGNAL  0x2
+
+#define DRM_IOCTL_VGEM_FENCE_ATTACH    DRM_IOWR( DRM_COMMAND_BASE + DRM_VGEM_FENCE_ATTACH, struct drm_vgem_fence_attach)
+#define DRM_IOCTL_VGEM_FENCE_SIGNAL    DRM_IOW( DRM_COMMAND_BASE + DRM_VGEM_FENCE_SIGNAL, struct drm_vgem_fence_signal)
+
+struct drm_vgem_fence_attach {
+       __u32 handle;
+       __u32 flags;
+#define VGEM_FENCE_WRITE       0x1
+       __u32 out_fence;
+       __u32 pad;
+};
+
+struct drm_vgem_fence_signal {
+       __u32 fence;
+       __u32 flags;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_VGEM_DRM_H_ */
index 8bdae34d1f9add25d968182c6540c431d994c7ce..ec10cfef166afb3a7cdc777ade3903e076d30376 100644 (file)
@@ -245,6 +245,7 @@ endif
 header-y += hw_breakpoint.h
 header-y += l2tp.h
 header-y += libc-compat.h
+header-y += lirc.h
 header-y += limits.h
 header-y += llc.h
 header-y += loop.h
index 23c6960e94a4fa268dbd8286be6a993ebbd6ce12..2bdd1e3e70076dd3476e3398452c4837b2b80f9b 100644 (file)
@@ -118,7 +118,7 @@ struct btrfs_ioctl_vol_args_v2 {
        };
        union {
                char name[BTRFS_SUBVOL_NAME_MAX + 1];
-               u64 devid;
+               __u64 devid;
        };
 };
 
index 9222db8ccccc1a212f69f2a4922a17b76de3bd31..5f030b46cff4ac6c18117d1eb96dbe14b536389d 100644 (file)
@@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices {
        ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
        ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
        ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
+       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT  = 31,
+       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT  = 32,
+       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT  = 33,
+       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
+       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
+       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT        = 36,
+       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT        = 37,
+       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT        = 38,
+       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT    = 39,
 
        /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
         * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
@@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices {
         */
 
        __ETHTOOL_LINK_MODE_LAST
-         = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+         = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
 };
 
 #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name)     \
index 5974fae54e12c534826d323b35917956275fd5a6..27e17363263abcc0ee9a6459ecd247de07b71a86 100644 (file)
  *
  *  7.24
  *  - add FUSE_LSEEK for SEEK_HOLE and SEEK_DATA support
+ *
+ *  7.25
+ *  - add FUSE_PARALLEL_DIROPS
  */
 
 #ifndef _LINUX_FUSE_H
 #define FUSE_KERNEL_VERSION 7
 
 /** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 24
+#define FUSE_KERNEL_MINOR_VERSION 25
 
 /** The node ID of the root inode */
 #define FUSE_ROOT_ID 1
@@ -234,6 +237,7 @@ struct fuse_file_lock {
  * FUSE_ASYNC_DIO: asynchronous direct I/O submission
  * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
  * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
+ * FUSE_PARALLEL_DIROPS: allow parallel lookups and readdir
  */
 #define FUSE_ASYNC_READ                (1 << 0)
 #define FUSE_POSIX_LOCKS       (1 << 1)
@@ -253,6 +257,7 @@ struct fuse_file_lock {
 #define FUSE_ASYNC_DIO         (1 << 15)
 #define FUSE_WRITEBACK_CACHE   (1 << 16)
 #define FUSE_NO_OPEN_SUPPORT   (1 << 17)
+#define FUSE_PARALLEL_DIROPS    (1 << 18)
 
 /**
  * CUSE INIT request/reply flags
index ca1054dd82497ddcbce9624e9ad20b2791248be4..72a04a0e8ccef1e5560378af9177a6c47954daaf 100644 (file)
@@ -1,5 +1,5 @@
 #ifndef _UAPI_LINUX_GTP_H_
-#define _UAPI_LINUX_GTP_H__
+#define _UAPI_LINUX_GTP_H_
 
 enum gtp_genl_cmds {
        GTP_CMD_NEWPDP,
index 87cf351bab03998d51817f7085d2d1f7abda0b43..d6d071fc3c568249bf70f24602f95770eb1643df 100644 (file)
 #define KEY_KBDINPUTASSIST_ACCEPT              0x264
 #define KEY_KBDINPUTASSIST_CANCEL              0x265
 
+/* Diagonal movement keys */
+#define KEY_RIGHT_UP                   0x266
+#define KEY_RIGHT_DOWN                 0x267
+#define KEY_LEFT_UP                    0x268
+#define KEY_LEFT_DOWN                  0x269
+
+#define KEY_ROOT_MENU                  0x26a /* Show Device's Root Menu */
+/* Show Top Menu of the Media (e.g. DVD) */
+#define KEY_MEDIA_TOP_MENU             0x26b
+#define KEY_NUMERIC_11                 0x26c
+#define KEY_NUMERIC_12                 0x26d
+/*
+ * Toggle Audio Description: refers to an audio service that helps blind and
+ * visually impaired consumers understand the action in a program. Note: in
+ * some countries this is referred to as "Video Description".
+ */
+#define KEY_AUDIO_DESC                 0x26e
+#define KEY_3D_MODE                    0x26f
+#define KEY_NEXT_FAVORITE              0x270
+#define KEY_STOP_RECORD                        0x271
+#define KEY_PAUSE_RECORD               0x272
+#define KEY_VOD                                0x273 /* Video on Demand */
+#define KEY_UNMUTE                     0x274
+#define KEY_FASTREVERSE                        0x275
+#define KEY_SLOWREVERSE                        0x276
+/*
+ * Control a data application associated with the currently viewed channel,
+ * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
+ */
+#define KEY_DATA                       0x275
+
 #define BTN_TRIGGER_HAPPY              0x2c0
 #define BTN_TRIGGER_HAPPY1             0x2c0
 #define BTN_TRIGGER_HAPPY2             0x2c1
 #define SW_ROTATE_LOCK         0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT       0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE         0x0e  /* set = device disabled */
+#define SW_PEN_INSERTED                0x0f  /* set = pen inserted */
 #define SW_MAX                 0x0f
 #define SW_CNT                 (SW_MAX+1)
 
index 01113841190d3f9ee6c654b2f728d95b44a62bb3..c514941198173c0a73c19d5441867d5d7fa4fe76 100644 (file)
@@ -247,6 +247,7 @@ struct input_mask {
 #define BUS_ATARI              0x1B
 #define BUS_SPI                        0x1C
 #define BUS_RMI                        0x1D
+#define BUS_CEC                        0x1E
 
 /*
  * MT_TOOL types
index 1d973d2ba417247f4265d8dee6bf6566ae1f9047..cd26d7a0fd07094499f97568dc8ca4f50ae93c0f 100644 (file)
@@ -33,6 +33,7 @@ header-y += xt_NFLOG.h
 header-y += xt_NFQUEUE.h
 header-y += xt_RATEEST.h
 header-y += xt_SECMARK.h
+header-y += xt_SYNPROXY.h
 header-y += xt_TCPMSS.h
 header-y += xt_TCPOPTSTRIP.h
 header-y += xt_TEE.h
index 2d59fbaa93c621af56c8bc01e98445c4e54ae745..ca67e61d2a619e6a017e0d254844cbc49ab96629 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _XT_SYNPROXY_H
 #define _XT_SYNPROXY_H
 
+#include <linux/types.h>
+
 #define XT_SYNPROXY_OPT_MSS            0x01
 #define XT_SYNPROXY_OPT_WSCALE         0x02
 #define XT_SYNPROXY_OPT_SACK_PERM      0x04
index eba5914ba5d153fc3d047656cb7772c1559b5766..f4297c8a42fe4a500833fe13d5a89bdc3f677941 100644 (file)
@@ -145,6 +145,8 @@ enum {
        TCA_POLICE_PEAKRATE,
        TCA_POLICE_AVRATE,
        TCA_POLICE_RESULT,
+       TCA_POLICE_TM,
+       TCA_POLICE_PAD,
        __TCA_POLICE_MAX
 #define TCA_POLICE_RESULT TCA_POLICE_RESULT
 };
@@ -173,7 +175,7 @@ enum {
        TCA_U32_DIVISOR,
        TCA_U32_SEL,
        TCA_U32_POLICE,
-       TCA_U32_ACT,   
+       TCA_U32_ACT,
        TCA_U32_INDEV,
        TCA_U32_PCNT,
        TCA_U32_MARK,
index a7f27704f9807e7b14eadb9b08cf09c2c183a6a7..691984cb0b915da8afbc60211145d9dbb45c7143 100644 (file)
@@ -1,5 +1,6 @@
 # UAPI Header export list
 header-y += asequencer.h
+header-y += asoc.h
 header-y += asound.h
 header-y += asound_fm.h
 header-y += compress_offload.h
@@ -10,3 +11,5 @@ header-y += hdsp.h
 header-y += hdspm.h
 header-y += sb16_csp.h
 header-y += sfnt_info.h
+header-y += tlv.h
+header-y += usb_stream.h
index 3a2a794017896994a42d370e59b14e7a492c37b9..7adeaae0696109c0088016bd4ad5c340d6455dd1 100644 (file)
@@ -235,9 +235,6 @@ int ipu_di_init_sync_panel(struct ipu_di *, struct ipu_di_signal_cfg *sig);
 struct dmfc_channel;
 int ipu_dmfc_enable_channel(struct dmfc_channel *dmfc);
 void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc);
-int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
-               unsigned long bandwidth_mbs, int burstsize);
-void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc);
 void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width);
 struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel);
 void ipu_dmfc_put(struct dmfc_channel *dmfc);
index 56830d1dc762b34d0193c5b3468bd0e0612bc06b..e7003ee6e0638da1dfcf0ab2b0beac4fedd6e875 100644 (file)
 #ifndef __OMAP_PANEL_DATA_H
 #define __OMAP_PANEL_DATA_H
 
-#include <video/omapdss.h>
 #include <video/display_timing.h>
 
-struct omap_dss_device;
-
-/**
- * encoder_tfp410 platform data
- * @name: name for this display entity
- * @power_down_gpio: gpio number for PD pin (or -1 if not available)
- * @data_lines: number of DPI datalines
- */
-struct encoder_tfp410_platform_data {
-       const char *name;
-       const char *source;
-       int power_down_gpio;
-       int data_lines;
-};
-
-
-/**
- * connector_dvi platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @i2c_bus_num: i2c bus number to be used for reading EDID
- */
-struct connector_dvi_platform_data {
-       const char *name;
-       const char *source;
-       int i2c_bus_num;
-};
-
-/**
- * connector_hdmi platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- */
-struct connector_hdmi_platform_data {
-       const char *name;
-       const char *source;
-};
-
 /**
  * connector_atv platform data
  * @name: name for this display entity
  * @source: name of the display entity used as a video source
- * @connector_type: composite/svideo
  * @invert_polarity: invert signal polarity
  */
 struct connector_atv_platform_data {
        const char *name;
        const char *source;
 
-       enum omap_dss_venc_type connector_type;
        bool invert_polarity;
 };
 
@@ -104,33 +63,6 @@ struct panel_dpi_platform_data {
        int enable_gpio;
 };
 
-/**
- * panel_dsicm platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @reset_gpio: gpio to reset the panel (or -1)
- * @use_ext_te: use external TE GPIO
- * @ext_te_gpio: external TE GPIO
- * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
- * @use_dsi_backlight: true if panel uses DSI command to control backlight
- * @pin_config: DSI pin configuration
- */
-struct panel_dsicm_platform_data {
-       const char *name;
-       const char *source;
-
-       int reset_gpio;
-
-       bool use_ext_te;
-       int ext_te_gpio;
-
-       unsigned ulps_timeout;
-
-       bool use_dsi_backlight;
-
-       struct omap_dsi_pin_config pin_config;
-};
-
 /**
  * panel_acx565akm platform data
  * @name: name for this display entity
@@ -147,93 +79,4 @@ struct panel_acx565akm_platform_data {
        int datapairs;
 };
 
-/**
- * panel_lb035q02 platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- * @backlight_gpio: gpio to enable/disable the backlight (or -1)
- * @enable_gpio: gpio to enable/disable the panel (or -1)
- */
-struct panel_lb035q02_platform_data {
-       const char *name;
-       const char *source;
-
-       int data_lines;
-
-       int backlight_gpio;
-       int enable_gpio;
-};
-
-/**
- * panel_sharp_ls037v7dw01 platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- * @resb_gpio: reset signal GPIO
- * @ini_gpio: power on control GPIO
- * @mo_gpio: selection for resolution(VGA/QVGA) GPIO
- * @lr_gpio: selection for horizontal scanning direction GPIO
- * @ud_gpio: selection for vertical scanning direction GPIO
- */
-struct panel_sharp_ls037v7dw01_platform_data {
-       const char *name;
-       const char *source;
-
-       int data_lines;
-
-       int resb_gpio;
-       int ini_gpio;
-       int mo_gpio;
-       int lr_gpio;
-       int ud_gpio;
-};
-
-/**
- * panel-tpo-td043mtea1 platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- * @nreset_gpio: reset signal
- */
-struct panel_tpo_td043mtea1_platform_data {
-       const char *name;
-       const char *source;
-
-       int data_lines;
-
-       int nreset_gpio;
-};
-
-/**
- * panel-nec-nl8048hl11 platform data
- * @name: name for this display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- * @res_gpio: reset signal
- * @qvga_gpio: selection for resolution(QVGA/WVGA)
- */
-struct panel_nec_nl8048hl11_platform_data {
-       const char *name;
-       const char *source;
-
-       int data_lines;
-
-       int res_gpio;
-       int qvga_gpio;
-};
-
-/**
- * panel-tpo-td028ttec1 platform data
- * @name: name for display entity
- * @source: name of the display entity used as a video source
- * @data_lines: number of DPI datalines
- */
-struct panel_tpo_td028ttec1_platform_data {
-       const char *name;
-       const char *source;
-
-       int data_lines;
-};
-
 #endif /* __OMAP_PANEL_DATA_H */
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
deleted file mode 100644 (file)
index 8e14ad7..0000000
+++ /dev/null
@@ -1,922 +0,0 @@
-/*
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP_OMAPDSS_H
-#define __OMAP_OMAPDSS_H
-
-#include <linux/list.h>
-#include <linux/kobject.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-
-#include <video/videomode.h>
-
-#define DISPC_IRQ_FRAMEDONE            (1 << 0)
-#define DISPC_IRQ_VSYNC                        (1 << 1)
-#define DISPC_IRQ_EVSYNC_EVEN          (1 << 2)
-#define DISPC_IRQ_EVSYNC_ODD           (1 << 3)
-#define DISPC_IRQ_ACBIAS_COUNT_STAT    (1 << 4)
-#define DISPC_IRQ_PROG_LINE_NUM                (1 << 5)
-#define DISPC_IRQ_GFX_FIFO_UNDERFLOW   (1 << 6)
-#define DISPC_IRQ_GFX_END_WIN          (1 << 7)
-#define DISPC_IRQ_PAL_GAMMA_MASK       (1 << 8)
-#define DISPC_IRQ_OCP_ERR              (1 << 9)
-#define DISPC_IRQ_VID1_FIFO_UNDERFLOW  (1 << 10)
-#define DISPC_IRQ_VID1_END_WIN         (1 << 11)
-#define DISPC_IRQ_VID2_FIFO_UNDERFLOW  (1 << 12)
-#define DISPC_IRQ_VID2_END_WIN         (1 << 13)
-#define DISPC_IRQ_SYNC_LOST            (1 << 14)
-#define DISPC_IRQ_SYNC_LOST_DIGIT      (1 << 15)
-#define DISPC_IRQ_WAKEUP               (1 << 16)
-#define DISPC_IRQ_SYNC_LOST2           (1 << 17)
-#define DISPC_IRQ_VSYNC2               (1 << 18)
-#define DISPC_IRQ_VID3_END_WIN         (1 << 19)
-#define DISPC_IRQ_VID3_FIFO_UNDERFLOW  (1 << 20)
-#define DISPC_IRQ_ACBIAS_COUNT_STAT2   (1 << 21)
-#define DISPC_IRQ_FRAMEDONE2           (1 << 22)
-#define DISPC_IRQ_FRAMEDONEWB          (1 << 23)
-#define DISPC_IRQ_FRAMEDONETV          (1 << 24)
-#define DISPC_IRQ_WBBUFFEROVERFLOW     (1 << 25)
-#define DISPC_IRQ_WBUNCOMPLETEERROR    (1 << 26)
-#define DISPC_IRQ_SYNC_LOST3           (1 << 27)
-#define DISPC_IRQ_VSYNC3               (1 << 28)
-#define DISPC_IRQ_ACBIAS_COUNT_STAT3   (1 << 29)
-#define DISPC_IRQ_FRAMEDONE3           (1 << 30)
-
-struct omap_dss_device;
-struct omap_overlay_manager;
-struct dss_lcd_mgr_config;
-struct snd_aes_iec958;
-struct snd_cea_861_aud_if;
-struct hdmi_avi_infoframe;
-
-enum omap_display_type {
-       OMAP_DISPLAY_TYPE_NONE          = 0,
-       OMAP_DISPLAY_TYPE_DPI           = 1 << 0,
-       OMAP_DISPLAY_TYPE_DBI           = 1 << 1,
-       OMAP_DISPLAY_TYPE_SDI           = 1 << 2,
-       OMAP_DISPLAY_TYPE_DSI           = 1 << 3,
-       OMAP_DISPLAY_TYPE_VENC          = 1 << 4,
-       OMAP_DISPLAY_TYPE_HDMI          = 1 << 5,
-       OMAP_DISPLAY_TYPE_DVI           = 1 << 6,
-};
-
-enum omap_plane {
-       OMAP_DSS_GFX    = 0,
-       OMAP_DSS_VIDEO1 = 1,
-       OMAP_DSS_VIDEO2 = 2,
-       OMAP_DSS_VIDEO3 = 3,
-       OMAP_DSS_WB     = 4,
-};
-
-enum omap_channel {
-       OMAP_DSS_CHANNEL_LCD    = 0,
-       OMAP_DSS_CHANNEL_DIGIT  = 1,
-       OMAP_DSS_CHANNEL_LCD2   = 2,
-       OMAP_DSS_CHANNEL_LCD3   = 3,
-       OMAP_DSS_CHANNEL_WB     = 4,
-};
-
-enum omap_color_mode {
-       OMAP_DSS_COLOR_CLUT1    = 1 << 0,  /* BITMAP 1 */
-       OMAP_DSS_COLOR_CLUT2    = 1 << 1,  /* BITMAP 2 */
-       OMAP_DSS_COLOR_CLUT4    = 1 << 2,  /* BITMAP 4 */
-       OMAP_DSS_COLOR_CLUT8    = 1 << 3,  /* BITMAP 8 */
-       OMAP_DSS_COLOR_RGB12U   = 1 << 4,  /* RGB12, 16-bit container */
-       OMAP_DSS_COLOR_ARGB16   = 1 << 5,  /* ARGB16 */
-       OMAP_DSS_COLOR_RGB16    = 1 << 6,  /* RGB16 */
-       OMAP_DSS_COLOR_RGB24U   = 1 << 7,  /* RGB24, 32-bit container */
-       OMAP_DSS_COLOR_RGB24P   = 1 << 8,  /* RGB24, 24-bit container */
-       OMAP_DSS_COLOR_YUV2     = 1 << 9,  /* YUV2 4:2:2 co-sited */
-       OMAP_DSS_COLOR_UYVY     = 1 << 10, /* UYVY 4:2:2 co-sited */
-       OMAP_DSS_COLOR_ARGB32   = 1 << 11, /* ARGB32 */
-       OMAP_DSS_COLOR_RGBA32   = 1 << 12, /* RGBA32 */
-       OMAP_DSS_COLOR_RGBX32   = 1 << 13, /* RGBx32 */
-       OMAP_DSS_COLOR_NV12             = 1 << 14, /* NV12 format: YUV 4:2:0 */
-       OMAP_DSS_COLOR_RGBA16           = 1 << 15, /* RGBA16 - 4444 */
-       OMAP_DSS_COLOR_RGBX16           = 1 << 16, /* RGBx16 - 4444 */
-       OMAP_DSS_COLOR_ARGB16_1555      = 1 << 17, /* ARGB16 - 1555 */
-       OMAP_DSS_COLOR_XRGB16_1555      = 1 << 18, /* xRGB16 - 1555 */
-};
-
-enum omap_dss_load_mode {
-       OMAP_DSS_LOAD_CLUT_AND_FRAME    = 0,
-       OMAP_DSS_LOAD_CLUT_ONLY         = 1,
-       OMAP_DSS_LOAD_FRAME_ONLY        = 2,
-       OMAP_DSS_LOAD_CLUT_ONCE_FRAME   = 3,
-};
-
-enum omap_dss_trans_key_type {
-       OMAP_DSS_COLOR_KEY_GFX_DST = 0,
-       OMAP_DSS_COLOR_KEY_VID_SRC = 1,
-};
-
-enum omap_rfbi_te_mode {
-       OMAP_DSS_RFBI_TE_MODE_1 = 1,
-       OMAP_DSS_RFBI_TE_MODE_2 = 2,
-};
-
-enum omap_dss_signal_level {
-       OMAPDSS_SIG_ACTIVE_LOW,
-       OMAPDSS_SIG_ACTIVE_HIGH,
-};
-
-enum omap_dss_signal_edge {
-       OMAPDSS_DRIVE_SIG_FALLING_EDGE,
-       OMAPDSS_DRIVE_SIG_RISING_EDGE,
-};
-
-enum omap_dss_venc_type {
-       OMAP_DSS_VENC_TYPE_COMPOSITE,
-       OMAP_DSS_VENC_TYPE_SVIDEO,
-};
-
-enum omap_dss_dsi_pixel_format {
-       OMAP_DSS_DSI_FMT_RGB888,
-       OMAP_DSS_DSI_FMT_RGB666,
-       OMAP_DSS_DSI_FMT_RGB666_PACKED,
-       OMAP_DSS_DSI_FMT_RGB565,
-};
-
-enum omap_dss_dsi_mode {
-       OMAP_DSS_DSI_CMD_MODE = 0,
-       OMAP_DSS_DSI_VIDEO_MODE,
-};
-
-enum omap_display_caps {
-       OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE      = 1 << 0,
-       OMAP_DSS_DISPLAY_CAP_TEAR_ELIM          = 1 << 1,
-};
-
-enum omap_dss_display_state {
-       OMAP_DSS_DISPLAY_DISABLED = 0,
-       OMAP_DSS_DISPLAY_ACTIVE,
-};
-
-struct omap_dss_audio {
-       struct snd_aes_iec958 *iec;
-       struct snd_cea_861_aud_if *cea;
-};
-
-enum omap_dss_rotation_type {
-       OMAP_DSS_ROT_DMA        = 1 << 0,
-       OMAP_DSS_ROT_VRFB       = 1 << 1,
-       OMAP_DSS_ROT_TILER      = 1 << 2,
-};
-
-/* clockwise rotation angle */
-enum omap_dss_rotation_angle {
-       OMAP_DSS_ROT_0   = 0,
-       OMAP_DSS_ROT_90  = 1,
-       OMAP_DSS_ROT_180 = 2,
-       OMAP_DSS_ROT_270 = 3,
-};
-
-enum omap_overlay_caps {
-       OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
-       OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1,
-       OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2,
-       OMAP_DSS_OVL_CAP_ZORDER = 1 << 3,
-       OMAP_DSS_OVL_CAP_POS = 1 << 4,
-       OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
-};
-
-enum omap_overlay_manager_caps {
-       OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
-};
-
-enum omap_dss_clk_source {
-       OMAP_DSS_CLK_SRC_FCK = 0,               /* OMAP2/3: DSS1_ALWON_FCLK
-                                                * OMAP4: DSS_FCLK */
-       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC,   /* OMAP3: DSI1_PLL_FCLK
-                                                * OMAP4: PLL1_CLK1 */
-       OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI,     /* OMAP3: DSI2_PLL_FCLK
-                                                * OMAP4: PLL1_CLK2 */
-       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC,  /* OMAP4: PLL2_CLK1 */
-       OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI,    /* OMAP4: PLL2_CLK2 */
-};
-
-enum omap_hdmi_flags {
-       OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
-};
-
-enum omap_dss_output_id {
-       OMAP_DSS_OUTPUT_DPI     = 1 << 0,
-       OMAP_DSS_OUTPUT_DBI     = 1 << 1,
-       OMAP_DSS_OUTPUT_SDI     = 1 << 2,
-       OMAP_DSS_OUTPUT_DSI1    = 1 << 3,
-       OMAP_DSS_OUTPUT_DSI2    = 1 << 4,
-       OMAP_DSS_OUTPUT_VENC    = 1 << 5,
-       OMAP_DSS_OUTPUT_HDMI    = 1 << 6,
-};
-
-/* RFBI */
-
-struct rfbi_timings {
-       int cs_on_time;
-       int cs_off_time;
-       int we_on_time;
-       int we_off_time;
-       int re_on_time;
-       int re_off_time;
-       int we_cycle_time;
-       int re_cycle_time;
-       int cs_pulse_width;
-       int access_time;
-
-       int clk_div;
-
-       u32 tim[5];             /* set by rfbi_convert_timings() */
-
-       int converted;
-};
-
-/* DSI */
-
-enum omap_dss_dsi_trans_mode {
-       /* Sync Pulses: both sync start and end packets sent */
-       OMAP_DSS_DSI_PULSE_MODE,
-       /* Sync Events: only sync start packets sent */
-       OMAP_DSS_DSI_EVENT_MODE,
-       /* Burst: only sync start packets sent, pixels are time compressed */
-       OMAP_DSS_DSI_BURST_MODE,
-};
-
-struct omap_dss_dsi_videomode_timings {
-       unsigned long hsclk;
-
-       unsigned ndl;
-       unsigned bitspp;
-
-       /* pixels */
-       u16 hact;
-       /* lines */
-       u16 vact;
-
-       /* DSI video mode blanking data */
-       /* Unit: byte clock cycles */
-       u16 hss;
-       u16 hsa;
-       u16 hse;
-       u16 hfp;
-       u16 hbp;
-       /* Unit: line clocks */
-       u16 vsa;
-       u16 vfp;
-       u16 vbp;
-
-       /* DSI blanking modes */
-       int blanking_mode;
-       int hsa_blanking_mode;
-       int hbp_blanking_mode;
-       int hfp_blanking_mode;
-
-       enum omap_dss_dsi_trans_mode trans_mode;
-
-       bool ddr_clk_always_on;
-       int window_sync;
-};
-
-struct omap_dss_dsi_config {
-       enum omap_dss_dsi_mode mode;
-       enum omap_dss_dsi_pixel_format pixel_format;
-       const struct omap_video_timings *timings;
-
-       unsigned long hs_clk_min, hs_clk_max;
-       unsigned long lp_clk_min, lp_clk_max;
-
-       bool ddr_clk_always_on;
-       enum omap_dss_dsi_trans_mode trans_mode;
-};
-
-enum omapdss_version {
-       OMAPDSS_VER_UNKNOWN = 0,
-       OMAPDSS_VER_OMAP24xx,
-       OMAPDSS_VER_OMAP34xx_ES1,       /* OMAP3430 ES1.0, 2.0 */
-       OMAPDSS_VER_OMAP34xx_ES3,       /* OMAP3430 ES3.0+ */
-       OMAPDSS_VER_OMAP3630,
-       OMAPDSS_VER_AM35xx,
-       OMAPDSS_VER_OMAP4430_ES1,       /* OMAP4430 ES1.0 */
-       OMAPDSS_VER_OMAP4430_ES2,       /* OMAP4430 ES2.0, 2.1, 2.2 */
-       OMAPDSS_VER_OMAP4,              /* All other OMAP4s */
-       OMAPDSS_VER_OMAP5,
-       OMAPDSS_VER_AM43xx,
-       OMAPDSS_VER_DRA7xx,
-};
-
-/* Board specific data */
-struct omap_dss_board_info {
-       int num_devices;
-       struct omap_dss_device **devices;
-       struct omap_dss_device *default_device;
-       const char *default_display_name;
-       int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
-       void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
-       int (*set_min_bus_tput)(struct device *dev, unsigned long r);
-       enum omapdss_version version;
-};
-
-/* Init with the board info */
-extern int omap_display_init(struct omap_dss_board_info *board_data);
-
-struct omap_video_timings {
-       /* Unit: pixels */
-       u16 x_res;
-       /* Unit: pixels */
-       u16 y_res;
-       /* Unit: Hz */
-       u32 pixelclock;
-       /* Unit: pixel clocks */
-       u16 hsw;        /* Horizontal synchronization pulse width */
-       /* Unit: pixel clocks */
-       u16 hfp;        /* Horizontal front porch */
-       /* Unit: pixel clocks */
-       u16 hbp;        /* Horizontal back porch */
-       /* Unit: line clocks */
-       u16 vsw;        /* Vertical synchronization pulse width */
-       /* Unit: line clocks */
-       u16 vfp;        /* Vertical front porch */
-       /* Unit: line clocks */
-       u16 vbp;        /* Vertical back porch */
-
-       /* Vsync logic level */
-       enum omap_dss_signal_level vsync_level;
-       /* Hsync logic level */
-       enum omap_dss_signal_level hsync_level;
-       /* Interlaced or Progressive timings */
-       bool interlace;
-       /* Pixel clock edge to drive LCD data */
-       enum omap_dss_signal_edge data_pclk_edge;
-       /* Data enable logic level */
-       enum omap_dss_signal_level de_level;
-       /* Pixel clock edges to drive HSYNC and VSYNC signals */
-       enum omap_dss_signal_edge sync_pclk_edge;
-
-       bool double_pixel;
-};
-
-/* Hardcoded timings for tv modes. Venc only uses these to
- * identify the mode, and does not actually use the configs
- * itself. However, the configs should be something that
- * a normal monitor can also show */
-extern const struct omap_video_timings omap_dss_pal_timings;
-extern const struct omap_video_timings omap_dss_ntsc_timings;
-
-struct omap_dss_cpr_coefs {
-       s16 rr, rg, rb;
-       s16 gr, gg, gb;
-       s16 br, bg, bb;
-};
-
-struct omap_overlay_info {
-       dma_addr_t paddr;
-       dma_addr_t p_uv_addr;  /* for NV12 format */
-       u16 screen_width;
-       u16 width;
-       u16 height;
-       enum omap_color_mode color_mode;
-       u8 rotation;
-       enum omap_dss_rotation_type rotation_type;
-       bool mirror;
-
-       u16 pos_x;
-       u16 pos_y;
-       u16 out_width;  /* if 0, out_width == width */
-       u16 out_height; /* if 0, out_height == height */
-       u8 global_alpha;
-       u8 pre_mult_alpha;
-       u8 zorder;
-};
-
-struct omap_overlay {
-       struct kobject kobj;
-       struct list_head list;
-
-       /* static fields */
-       const char *name;
-       enum omap_plane id;
-       enum omap_color_mode supported_modes;
-       enum omap_overlay_caps caps;
-
-       /* dynamic fields */
-       struct omap_overlay_manager *manager;
-
-       /*
-        * The following functions do not block:
-        *
-        * is_enabled
-        * set_overlay_info
-        * get_overlay_info
-        *
-        * The rest of the functions may block and cannot be called from
-        * interrupt context
-        */
-
-       int (*enable)(struct omap_overlay *ovl);
-       int (*disable)(struct omap_overlay *ovl);
-       bool (*is_enabled)(struct omap_overlay *ovl);
-
-       int (*set_manager)(struct omap_overlay *ovl,
-               struct omap_overlay_manager *mgr);
-       int (*unset_manager)(struct omap_overlay *ovl);
-
-       int (*set_overlay_info)(struct omap_overlay *ovl,
-                       struct omap_overlay_info *info);
-       void (*get_overlay_info)(struct omap_overlay *ovl,
-                       struct omap_overlay_info *info);
-
-       int (*wait_for_go)(struct omap_overlay *ovl);
-
-       struct omap_dss_device *(*get_device)(struct omap_overlay *ovl);
-};
-
-struct omap_overlay_manager_info {
-       u32 default_color;
-
-       enum omap_dss_trans_key_type trans_key_type;
-       u32 trans_key;
-       bool trans_enabled;
-
-       bool partial_alpha_enabled;
-
-       bool cpr_enable;
-       struct omap_dss_cpr_coefs cpr_coefs;
-};
-
-struct omap_overlay_manager {
-       struct kobject kobj;
-
-       /* static fields */
-       const char *name;
-       enum omap_channel id;
-       enum omap_overlay_manager_caps caps;
-       struct list_head overlays;
-       enum omap_display_type supported_displays;
-       enum omap_dss_output_id supported_outputs;
-
-       /* dynamic fields */
-       struct omap_dss_device *output;
-
-       /*
-        * The following functions do not block:
-        *
-        * set_manager_info
-        * get_manager_info
-        * apply
-        *
-        * The rest of the functions may block and cannot be called from
-        * interrupt context
-        */
-
-       int (*set_output)(struct omap_overlay_manager *mgr,
-               struct omap_dss_device *output);
-       int (*unset_output)(struct omap_overlay_manager *mgr);
-
-       int (*set_manager_info)(struct omap_overlay_manager *mgr,
-                       struct omap_overlay_manager_info *info);
-       void (*get_manager_info)(struct omap_overlay_manager *mgr,
-                       struct omap_overlay_manager_info *info);
-
-       int (*apply)(struct omap_overlay_manager *mgr);
-       int (*wait_for_go)(struct omap_overlay_manager *mgr);
-       int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
-
-       struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
-};
-
-/* 22 pins means 1 clk lane and 10 data lanes */
-#define OMAP_DSS_MAX_DSI_PINS 22
-
-struct omap_dsi_pin_config {
-       int num_pins;
-       /*
-        * pin numbers in the following order:
-        * clk+, clk-
-        * data1+, data1-
-        * data2+, data2-
-        * ...
-        */
-       int pins[OMAP_DSS_MAX_DSI_PINS];
-};
-
-struct omap_dss_writeback_info {
-       u32 paddr;
-       u32 p_uv_addr;
-       u16 buf_width;
-       u16 width;
-       u16 height;
-       enum omap_color_mode color_mode;
-       u8 rotation;
-       enum omap_dss_rotation_type rotation_type;
-       bool mirror;
-       u8 pre_mult_alpha;
-};
-
-struct omapdss_dpi_ops {
-       int (*connect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-       void (*disconnect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-
-       int (*enable)(struct omap_dss_device *dssdev);
-       void (*disable)(struct omap_dss_device *dssdev);
-
-       int (*check_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*set_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*get_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-
-       void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
-};
-
-struct omapdss_sdi_ops {
-       int (*connect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-       void (*disconnect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-
-       int (*enable)(struct omap_dss_device *dssdev);
-       void (*disable)(struct omap_dss_device *dssdev);
-
-       int (*check_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*set_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*get_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-
-       void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
-};
-
-struct omapdss_dvi_ops {
-       int (*connect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-       void (*disconnect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-
-       int (*enable)(struct omap_dss_device *dssdev);
-       void (*disable)(struct omap_dss_device *dssdev);
-
-       int (*check_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*set_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*get_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-};
-
-struct omapdss_atv_ops {
-       int (*connect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-       void (*disconnect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-
-       int (*enable)(struct omap_dss_device *dssdev);
-       void (*disable)(struct omap_dss_device *dssdev);
-
-       int (*check_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*set_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*get_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-
-       void (*set_type)(struct omap_dss_device *dssdev,
-               enum omap_dss_venc_type type);
-       void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
-               bool invert_polarity);
-
-       int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
-       u32 (*get_wss)(struct omap_dss_device *dssdev);
-};
-
-struct omapdss_hdmi_ops {
-       int (*connect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-       void (*disconnect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-
-       int (*enable)(struct omap_dss_device *dssdev);
-       void (*disable)(struct omap_dss_device *dssdev);
-
-       int (*check_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*set_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*get_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-
-       int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
-       bool (*detect)(struct omap_dss_device *dssdev);
-
-       int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
-       int (*set_infoframe)(struct omap_dss_device *dssdev,
-               const struct hdmi_avi_infoframe *avi);
-};
-
-struct omapdss_dsi_ops {
-       int (*connect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-       void (*disconnect)(struct omap_dss_device *dssdev,
-                       struct omap_dss_device *dst);
-
-       int (*enable)(struct omap_dss_device *dssdev);
-       void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
-                       bool enter_ulps);
-
-       /* bus configuration */
-       int (*set_config)(struct omap_dss_device *dssdev,
-                       const struct omap_dss_dsi_config *cfg);
-       int (*configure_pins)(struct omap_dss_device *dssdev,
-                       const struct omap_dsi_pin_config *pin_cfg);
-
-       void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
-                       bool enable);
-       int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
-
-       int (*update)(struct omap_dss_device *dssdev, int channel,
-                       void (*callback)(int, void *), void *data);
-
-       void (*bus_lock)(struct omap_dss_device *dssdev);
-       void (*bus_unlock)(struct omap_dss_device *dssdev);
-
-       int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
-       void (*disable_video_output)(struct omap_dss_device *dssdev,
-                       int channel);
-
-       int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
-       int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
-                       int vc_id);
-       void (*release_vc)(struct omap_dss_device *dssdev, int channel);
-
-       /* data transfer */
-       int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
-                       u8 *data, int len);
-       int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
-                       u8 *data, int len);
-       int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
-                       u8 *data, int len);
-
-       int (*gen_write)(struct omap_dss_device *dssdev, int channel,
-                       u8 *data, int len);
-       int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
-                       u8 *data, int len);
-       int (*gen_read)(struct omap_dss_device *dssdev, int channel,
-                       u8 *reqdata, int reqlen,
-                       u8 *data, int len);
-
-       int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
-
-       int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
-                       int channel, u16 plen);
-};
-
-struct omap_dss_device {
-       struct kobject kobj;
-       struct device *dev;
-
-       struct module *owner;
-
-       struct list_head panel_list;
-
-       /* alias in the form of "display%d" */
-       char alias[16];
-
-       enum omap_display_type type;
-       enum omap_display_type output_type;
-
-       union {
-               struct {
-                       u8 data_lines;
-               } dpi;
-
-               struct {
-                       u8 channel;
-                       u8 data_lines;
-               } rfbi;
-
-               struct {
-                       u8 datapairs;
-               } sdi;
-
-               struct {
-                       int module;
-               } dsi;
-
-               struct {
-                       enum omap_dss_venc_type type;
-                       bool invert_polarity;
-               } venc;
-       } phy;
-
-       struct {
-               struct omap_video_timings timings;
-
-               enum omap_dss_dsi_pixel_format dsi_pix_fmt;
-               enum omap_dss_dsi_mode dsi_mode;
-       } panel;
-
-       struct {
-               u8 pixel_size;
-               struct rfbi_timings rfbi_timings;
-       } ctrl;
-
-       const char *name;
-
-       /* used to match device to driver */
-       const char *driver_name;
-
-       void *data;
-
-       struct omap_dss_driver *driver;
-
-       union {
-               const struct omapdss_dpi_ops *dpi;
-               const struct omapdss_sdi_ops *sdi;
-               const struct omapdss_dvi_ops *dvi;
-               const struct omapdss_hdmi_ops *hdmi;
-               const struct omapdss_atv_ops *atv;
-               const struct omapdss_dsi_ops *dsi;
-       } ops;
-
-       /* helper variable for driver suspend/resume */
-       bool activate_after_resume;
-
-       enum omap_display_caps caps;
-
-       struct omap_dss_device *src;
-
-       enum omap_dss_display_state state;
-
-       /* OMAP DSS output specific fields */
-
-       struct list_head list;
-
-       /* DISPC channel for this output */
-       enum omap_channel dispc_channel;
-       bool dispc_channel_connected;
-
-       /* output instance */
-       enum omap_dss_output_id id;
-
-       /* the port number in the DT node */
-       int port_num;
-
-       /* dynamic fields */
-       struct omap_overlay_manager *manager;
-
-       struct omap_dss_device *dst;
-};
-
-struct omap_dss_driver {
-       int (*probe)(struct omap_dss_device *);
-       void (*remove)(struct omap_dss_device *);
-
-       int (*connect)(struct omap_dss_device *dssdev);
-       void (*disconnect)(struct omap_dss_device *dssdev);
-
-       int (*enable)(struct omap_dss_device *display);
-       void (*disable)(struct omap_dss_device *display);
-       int (*run_test)(struct omap_dss_device *display, int test);
-
-       int (*update)(struct omap_dss_device *dssdev,
-                              u16 x, u16 y, u16 w, u16 h);
-       int (*sync)(struct omap_dss_device *dssdev);
-
-       int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
-       int (*get_te)(struct omap_dss_device *dssdev);
-
-       u8 (*get_rotate)(struct omap_dss_device *dssdev);
-       int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
-
-       bool (*get_mirror)(struct omap_dss_device *dssdev);
-       int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
-
-       int (*memory_read)(struct omap_dss_device *dssdev,
-                       void *buf, size_t size,
-                       u16 x, u16 y, u16 w, u16 h);
-
-       void (*get_resolution)(struct omap_dss_device *dssdev,
-                       u16 *xres, u16 *yres);
-       void (*get_dimensions)(struct omap_dss_device *dssdev,
-                       u32 *width, u32 *height);
-       int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
-
-       int (*check_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*set_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-       void (*get_timings)(struct omap_dss_device *dssdev,
-                       struct omap_video_timings *timings);
-
-       int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
-       u32 (*get_wss)(struct omap_dss_device *dssdev);
-
-       int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
-       bool (*detect)(struct omap_dss_device *dssdev);
-
-       int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
-       int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
-               const struct hdmi_avi_infoframe *avi);
-};
-
-enum omapdss_version omapdss_get_version(void);
-bool omapdss_is_initialized(void);
-
-int omap_dss_register_driver(struct omap_dss_driver *);
-void omap_dss_unregister_driver(struct omap_dss_driver *);
-
-int omapdss_register_display(struct omap_dss_device *dssdev);
-void omapdss_unregister_display(struct omap_dss_device *dssdev);
-
-struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
-void omap_dss_put_device(struct omap_dss_device *dssdev);
-#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
-struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
-struct omap_dss_device *omap_dss_find_device(void *data,
-               int (*match)(struct omap_dss_device *dssdev, void *data));
-const char *omapdss_get_default_display_name(void);
-
-void videomode_to_omap_video_timings(const struct videomode *vm,
-               struct omap_video_timings *ovt);
-void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
-               struct videomode *vm);
-
-int dss_feat_get_num_mgrs(void);
-int dss_feat_get_num_ovls(void);
-enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
-
-
-
-int omap_dss_get_num_overlay_managers(void);
-struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
-
-int omap_dss_get_num_overlays(void);
-struct omap_overlay *omap_dss_get_overlay(int num);
-
-int omapdss_register_output(struct omap_dss_device *output);
-void omapdss_unregister_output(struct omap_dss_device *output);
-struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
-struct omap_dss_device *omap_dss_find_output(const char *name);
-struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
-int omapdss_output_set_device(struct omap_dss_device *out,
-               struct omap_dss_device *dssdev);
-int omapdss_output_unset_device(struct omap_dss_device *out);
-
-struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
-struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
-
-void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
-               u16 *xres, u16 *yres);
-int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
-void omapdss_default_get_timings(struct omap_dss_device *dssdev,
-               struct omap_video_timings *timings);
-
-typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
-int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
-int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
-
-int omapdss_compat_init(void);
-void omapdss_compat_uninit(void);
-
-static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
-{
-       return dssdev->src;
-}
-
-static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
-{
-       return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
-}
-
-struct device_node *
-omapdss_of_get_next_port(const struct device_node *parent,
-                        struct device_node *prev);
-
-struct device_node *
-omapdss_of_get_next_endpoint(const struct device_node *parent,
-                            struct device_node *prev);
-
-struct device_node *
-omapdss_of_get_first_endpoint(const struct device_node *parent);
-
-struct omap_dss_device *
-omapdss_of_find_source_for_first_ep(struct device_node *node);
-
-#endif
diff --git a/include/video/omapfb_dss.h b/include/video/omapfb_dss.h
new file mode 100644 (file)
index 0000000..1d38901
--- /dev/null
@@ -0,0 +1,860 @@
+/*
+ * Copyright (C) 2016 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __OMAPFB_DSS_H
+#define __OMAPFB_DSS_H
+
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_data/omapdss.h>
+
+#include <video/videomode.h>
+
+#define DISPC_IRQ_FRAMEDONE            (1 << 0)
+#define DISPC_IRQ_VSYNC                        (1 << 1)
+#define DISPC_IRQ_EVSYNC_EVEN          (1 << 2)
+#define DISPC_IRQ_EVSYNC_ODD           (1 << 3)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT    (1 << 4)
+#define DISPC_IRQ_PROG_LINE_NUM                (1 << 5)
+#define DISPC_IRQ_GFX_FIFO_UNDERFLOW   (1 << 6)
+#define DISPC_IRQ_GFX_END_WIN          (1 << 7)
+#define DISPC_IRQ_PAL_GAMMA_MASK       (1 << 8)
+#define DISPC_IRQ_OCP_ERR              (1 << 9)
+#define DISPC_IRQ_VID1_FIFO_UNDERFLOW  (1 << 10)
+#define DISPC_IRQ_VID1_END_WIN         (1 << 11)
+#define DISPC_IRQ_VID2_FIFO_UNDERFLOW  (1 << 12)
+#define DISPC_IRQ_VID2_END_WIN         (1 << 13)
+#define DISPC_IRQ_SYNC_LOST            (1 << 14)
+#define DISPC_IRQ_SYNC_LOST_DIGIT      (1 << 15)
+#define DISPC_IRQ_WAKEUP               (1 << 16)
+#define DISPC_IRQ_SYNC_LOST2           (1 << 17)
+#define DISPC_IRQ_VSYNC2               (1 << 18)
+#define DISPC_IRQ_VID3_END_WIN         (1 << 19)
+#define DISPC_IRQ_VID3_FIFO_UNDERFLOW  (1 << 20)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT2   (1 << 21)
+#define DISPC_IRQ_FRAMEDONE2           (1 << 22)
+#define DISPC_IRQ_FRAMEDONEWB          (1 << 23)
+#define DISPC_IRQ_FRAMEDONETV          (1 << 24)
+#define DISPC_IRQ_WBBUFFEROVERFLOW     (1 << 25)
+#define DISPC_IRQ_WBUNCOMPLETEERROR    (1 << 26)
+#define DISPC_IRQ_SYNC_LOST3           (1 << 27)
+#define DISPC_IRQ_VSYNC3               (1 << 28)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT3   (1 << 29)
+#define DISPC_IRQ_FRAMEDONE3           (1 << 30)
+
+struct omap_dss_device;
+struct omap_overlay_manager;
+struct dss_lcd_mgr_config;
+struct snd_aes_iec958;
+struct snd_cea_861_aud_if;
+struct hdmi_avi_infoframe;
+
+enum omap_display_type {
+       OMAP_DISPLAY_TYPE_NONE          = 0,
+       OMAP_DISPLAY_TYPE_DPI           = 1 << 0,
+       OMAP_DISPLAY_TYPE_DBI           = 1 << 1,
+       OMAP_DISPLAY_TYPE_SDI           = 1 << 2,
+       OMAP_DISPLAY_TYPE_DSI           = 1 << 3,
+       OMAP_DISPLAY_TYPE_VENC          = 1 << 4,
+       OMAP_DISPLAY_TYPE_HDMI          = 1 << 5,
+       OMAP_DISPLAY_TYPE_DVI           = 1 << 6,
+};
+
+enum omap_plane {
+       OMAP_DSS_GFX    = 0,
+       OMAP_DSS_VIDEO1 = 1,
+       OMAP_DSS_VIDEO2 = 2,
+       OMAP_DSS_VIDEO3 = 3,
+       OMAP_DSS_WB     = 4,
+};
+
+enum omap_channel {
+       OMAP_DSS_CHANNEL_LCD    = 0,
+       OMAP_DSS_CHANNEL_DIGIT  = 1,
+       OMAP_DSS_CHANNEL_LCD2   = 2,
+       OMAP_DSS_CHANNEL_LCD3   = 3,
+       OMAP_DSS_CHANNEL_WB     = 4,
+};
+
+enum omap_color_mode {
+       OMAP_DSS_COLOR_CLUT1    = 1 << 0,  /* BITMAP 1 */
+       OMAP_DSS_COLOR_CLUT2    = 1 << 1,  /* BITMAP 2 */
+       OMAP_DSS_COLOR_CLUT4    = 1 << 2,  /* BITMAP 4 */
+       OMAP_DSS_COLOR_CLUT8    = 1 << 3,  /* BITMAP 8 */
+       OMAP_DSS_COLOR_RGB12U   = 1 << 4,  /* RGB12, 16-bit container */
+       OMAP_DSS_COLOR_ARGB16   = 1 << 5,  /* ARGB16 */
+       OMAP_DSS_COLOR_RGB16    = 1 << 6,  /* RGB16 */
+       OMAP_DSS_COLOR_RGB24U   = 1 << 7,  /* RGB24, 32-bit container */
+       OMAP_DSS_COLOR_RGB24P   = 1 << 8,  /* RGB24, 24-bit container */
+       OMAP_DSS_COLOR_YUV2     = 1 << 9,  /* YUV2 4:2:2 co-sited */
+       OMAP_DSS_COLOR_UYVY     = 1 << 10, /* UYVY 4:2:2 co-sited */
+       OMAP_DSS_COLOR_ARGB32   = 1 << 11, /* ARGB32 */
+       OMAP_DSS_COLOR_RGBA32   = 1 << 12, /* RGBA32 */
+       OMAP_DSS_COLOR_RGBX32   = 1 << 13, /* RGBx32 */
+       OMAP_DSS_COLOR_NV12             = 1 << 14, /* NV12 format: YUV 4:2:0 */
+       OMAP_DSS_COLOR_RGBA16           = 1 << 15, /* RGBA16 - 4444 */
+       OMAP_DSS_COLOR_RGBX16           = 1 << 16, /* RGBx16 - 4444 */
+       OMAP_DSS_COLOR_ARGB16_1555      = 1 << 17, /* ARGB16 - 1555 */
+       OMAP_DSS_COLOR_XRGB16_1555      = 1 << 18, /* xRGB16 - 1555 */
+};
+
+enum omap_dss_load_mode {
+       OMAP_DSS_LOAD_CLUT_AND_FRAME    = 0,
+       OMAP_DSS_LOAD_CLUT_ONLY         = 1,
+       OMAP_DSS_LOAD_FRAME_ONLY        = 2,
+       OMAP_DSS_LOAD_CLUT_ONCE_FRAME   = 3,
+};
+
+enum omap_dss_trans_key_type {
+       OMAP_DSS_COLOR_KEY_GFX_DST = 0,
+       OMAP_DSS_COLOR_KEY_VID_SRC = 1,
+};
+
+enum omap_rfbi_te_mode {
+       OMAP_DSS_RFBI_TE_MODE_1 = 1,
+       OMAP_DSS_RFBI_TE_MODE_2 = 2,
+};
+
+enum omap_dss_signal_level {
+       OMAPDSS_SIG_ACTIVE_LOW,
+       OMAPDSS_SIG_ACTIVE_HIGH,
+};
+
+enum omap_dss_signal_edge {
+       OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+       OMAPDSS_DRIVE_SIG_RISING_EDGE,
+};
+
+enum omap_dss_venc_type {
+       OMAP_DSS_VENC_TYPE_COMPOSITE,
+       OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+enum omap_dss_dsi_pixel_format {
+       OMAP_DSS_DSI_FMT_RGB888,
+       OMAP_DSS_DSI_FMT_RGB666,
+       OMAP_DSS_DSI_FMT_RGB666_PACKED,
+       OMAP_DSS_DSI_FMT_RGB565,
+};
+
+enum omap_dss_dsi_mode {
+       OMAP_DSS_DSI_CMD_MODE = 0,
+       OMAP_DSS_DSI_VIDEO_MODE,
+};
+
+enum omap_display_caps {
+       OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE      = 1 << 0,
+       OMAP_DSS_DISPLAY_CAP_TEAR_ELIM          = 1 << 1,
+};
+
+enum omap_dss_display_state {
+       OMAP_DSS_DISPLAY_DISABLED = 0,
+       OMAP_DSS_DISPLAY_ACTIVE,
+};
+
+enum omap_dss_rotation_type {
+       OMAP_DSS_ROT_DMA        = 1 << 0,
+       OMAP_DSS_ROT_VRFB       = 1 << 1,
+       OMAP_DSS_ROT_TILER      = 1 << 2,
+};
+
+/* clockwise rotation angle */
+enum omap_dss_rotation_angle {
+       OMAP_DSS_ROT_0   = 0,
+       OMAP_DSS_ROT_90  = 1,
+       OMAP_DSS_ROT_180 = 2,
+       OMAP_DSS_ROT_270 = 3,
+};
+
+enum omap_overlay_caps {
+       OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
+       OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1,
+       OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2,
+       OMAP_DSS_OVL_CAP_ZORDER = 1 << 3,
+       OMAP_DSS_OVL_CAP_POS = 1 << 4,
+       OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
+};
+
+enum omap_dss_output_id {
+       OMAP_DSS_OUTPUT_DPI     = 1 << 0,
+       OMAP_DSS_OUTPUT_DBI     = 1 << 1,
+       OMAP_DSS_OUTPUT_SDI     = 1 << 2,
+       OMAP_DSS_OUTPUT_DSI1    = 1 << 3,
+       OMAP_DSS_OUTPUT_DSI2    = 1 << 4,
+       OMAP_DSS_OUTPUT_VENC    = 1 << 5,
+       OMAP_DSS_OUTPUT_HDMI    = 1 << 6,
+};
+
+/* RFBI */
+
+struct rfbi_timings {
+       int cs_on_time;
+       int cs_off_time;
+       int we_on_time;
+       int we_off_time;
+       int re_on_time;
+       int re_off_time;
+       int we_cycle_time;
+       int re_cycle_time;
+       int cs_pulse_width;
+       int access_time;
+
+       int clk_div;
+
+       u32 tim[5];             /* set by rfbi_convert_timings() */
+
+       int converted;
+};
+
+/* DSI */
+
+enum omap_dss_dsi_trans_mode {
+       /* Sync Pulses: both sync start and end packets sent */
+       OMAP_DSS_DSI_PULSE_MODE,
+       /* Sync Events: only sync start packets sent */
+       OMAP_DSS_DSI_EVENT_MODE,
+       /* Burst: only sync start packets sent, pixels are time compressed */
+       OMAP_DSS_DSI_BURST_MODE,
+};
+
+struct omap_dss_dsi_videomode_timings {
+       unsigned long hsclk;
+
+       unsigned ndl;
+       unsigned bitspp;
+
+       /* pixels */
+       u16 hact;
+       /* lines */
+       u16 vact;
+
+       /* DSI video mode blanking data */
+       /* Unit: byte clock cycles */
+       u16 hss;
+       u16 hsa;
+       u16 hse;
+       u16 hfp;
+       u16 hbp;
+       /* Unit: line clocks */
+       u16 vsa;
+       u16 vfp;
+       u16 vbp;
+
+       /* DSI blanking modes */
+       int blanking_mode;
+       int hsa_blanking_mode;
+       int hbp_blanking_mode;
+       int hfp_blanking_mode;
+
+       enum omap_dss_dsi_trans_mode trans_mode;
+
+       bool ddr_clk_always_on;
+       int window_sync;
+};
+
+struct omap_dss_dsi_config {
+       enum omap_dss_dsi_mode mode;
+       enum omap_dss_dsi_pixel_format pixel_format;
+       const struct omap_video_timings *timings;
+
+       unsigned long hs_clk_min, hs_clk_max;
+       unsigned long lp_clk_min, lp_clk_max;
+
+       bool ddr_clk_always_on;
+       enum omap_dss_dsi_trans_mode trans_mode;
+};
+
+struct omap_video_timings {
+       /* Unit: pixels */
+       u16 x_res;
+       /* Unit: pixels */
+       u16 y_res;
+       /* Unit: Hz */
+       u32 pixelclock;
+       /* Unit: pixel clocks */
+       u16 hsw;        /* Horizontal synchronization pulse width */
+       /* Unit: pixel clocks */
+       u16 hfp;        /* Horizontal front porch */
+       /* Unit: pixel clocks */
+       u16 hbp;        /* Horizontal back porch */
+       /* Unit: line clocks */
+       u16 vsw;        /* Vertical synchronization pulse width */
+       /* Unit: line clocks */
+       u16 vfp;        /* Vertical front porch */
+       /* Unit: line clocks */
+       u16 vbp;        /* Vertical back porch */
+
+       /* Vsync logic level */
+       enum omap_dss_signal_level vsync_level;
+       /* Hsync logic level */
+       enum omap_dss_signal_level hsync_level;
+       /* Interlaced or Progressive timings */
+       bool interlace;
+       /* Pixel clock edge to drive LCD data */
+       enum omap_dss_signal_edge data_pclk_edge;
+       /* Data enable logic level */
+       enum omap_dss_signal_level de_level;
+       /* Pixel clock edges to drive HSYNC and VSYNC signals */
+       enum omap_dss_signal_edge sync_pclk_edge;
+
+       bool double_pixel;
+};
+
+/* Hardcoded timings for tv modes. Venc only uses these to
+ * identify the mode, and does not actually use the configs
+ * itself. However, the configs should be something that
+ * a normal monitor can also show */
+extern const struct omap_video_timings omap_dss_pal_timings;
+extern const struct omap_video_timings omap_dss_ntsc_timings;
+
+struct omap_dss_cpr_coefs {
+       s16 rr, rg, rb;
+       s16 gr, gg, gb;
+       s16 br, bg, bb;
+};
+
+struct omap_overlay_info {
+       dma_addr_t paddr;
+       dma_addr_t p_uv_addr;  /* for NV12 format */
+       u16 screen_width;
+       u16 width;
+       u16 height;
+       enum omap_color_mode color_mode;
+       u8 rotation;
+       enum omap_dss_rotation_type rotation_type;
+       bool mirror;
+
+       u16 pos_x;
+       u16 pos_y;
+       u16 out_width;  /* if 0, out_width == width */
+       u16 out_height; /* if 0, out_height == height */
+       u8 global_alpha;
+       u8 pre_mult_alpha;
+       u8 zorder;
+};
+
+struct omap_overlay {
+       struct kobject kobj;
+       struct list_head list;
+
+       /* static fields */
+       const char *name;
+       enum omap_plane id;
+       enum omap_color_mode supported_modes;
+       enum omap_overlay_caps caps;
+
+       /* dynamic fields */
+       struct omap_overlay_manager *manager;
+
+       /*
+        * The following functions do not block:
+        *
+        * is_enabled
+        * set_overlay_info
+        * get_overlay_info
+        *
+        * The rest of the functions may block and cannot be called from
+        * interrupt context
+        */
+
+       int (*enable)(struct omap_overlay *ovl);
+       int (*disable)(struct omap_overlay *ovl);
+       bool (*is_enabled)(struct omap_overlay *ovl);
+
+       int (*set_manager)(struct omap_overlay *ovl,
+               struct omap_overlay_manager *mgr);
+       int (*unset_manager)(struct omap_overlay *ovl);
+
+       int (*set_overlay_info)(struct omap_overlay *ovl,
+                       struct omap_overlay_info *info);
+       void (*get_overlay_info)(struct omap_overlay *ovl,
+                       struct omap_overlay_info *info);
+
+       int (*wait_for_go)(struct omap_overlay *ovl);
+
+       struct omap_dss_device *(*get_device)(struct omap_overlay *ovl);
+};
+
+struct omap_overlay_manager_info {
+       u32 default_color;
+
+       enum omap_dss_trans_key_type trans_key_type;
+       u32 trans_key;
+       bool trans_enabled;
+
+       bool partial_alpha_enabled;
+
+       bool cpr_enable;
+       struct omap_dss_cpr_coefs cpr_coefs;
+};
+
+struct omap_overlay_manager {
+       struct kobject kobj;
+
+       /* static fields */
+       const char *name;
+       enum omap_channel id;
+       struct list_head overlays;
+       enum omap_display_type supported_displays;
+       enum omap_dss_output_id supported_outputs;
+
+       /* dynamic fields */
+       struct omap_dss_device *output;
+
+       /*
+        * The following functions do not block:
+        *
+        * set_manager_info
+        * get_manager_info
+        * apply
+        *
+        * The rest of the functions may block and cannot be called from
+        * interrupt context
+        */
+
+       int (*set_output)(struct omap_overlay_manager *mgr,
+               struct omap_dss_device *output);
+       int (*unset_output)(struct omap_overlay_manager *mgr);
+
+       int (*set_manager_info)(struct omap_overlay_manager *mgr,
+                       struct omap_overlay_manager_info *info);
+       void (*get_manager_info)(struct omap_overlay_manager *mgr,
+                       struct omap_overlay_manager_info *info);
+
+       int (*apply)(struct omap_overlay_manager *mgr);
+       int (*wait_for_go)(struct omap_overlay_manager *mgr);
+       int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
+
+       struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
+};
+
+/* 22 pins means 1 clk lane and 10 data lanes */
+#define OMAP_DSS_MAX_DSI_PINS 22
+
+struct omap_dsi_pin_config {
+       int num_pins;
+       /*
+        * pin numbers in the following order:
+        * clk+, clk-
+        * data1+, data1-
+        * data2+, data2-
+        * ...
+        */
+       int pins[OMAP_DSS_MAX_DSI_PINS];
+};
+
+struct omap_dss_writeback_info {
+       u32 paddr;
+       u32 p_uv_addr;
+       u16 buf_width;
+       u16 width;
+       u16 height;
+       enum omap_color_mode color_mode;
+       u8 rotation;
+       enum omap_dss_rotation_type rotation_type;
+       bool mirror;
+       u8 pre_mult_alpha;
+};
+
+struct omapdss_dpi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
+};
+
+struct omapdss_sdi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
+};
+
+struct omapdss_dvi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+};
+
+struct omapdss_atv_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       void (*set_type)(struct omap_dss_device *dssdev,
+               enum omap_dss_venc_type type);
+       void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
+               bool invert_polarity);
+
+       int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+       u32 (*get_wss)(struct omap_dss_device *dssdev);
+};
+
+struct omapdss_hdmi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+       bool (*detect)(struct omap_dss_device *dssdev);
+
+       int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
+       int (*set_infoframe)(struct omap_dss_device *dssdev,
+               const struct hdmi_avi_infoframe *avi);
+};
+
+struct omapdss_dsi_ops {
+       int (*connect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+       void (*disconnect)(struct omap_dss_device *dssdev,
+                       struct omap_dss_device *dst);
+
+       int (*enable)(struct omap_dss_device *dssdev);
+       void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
+                       bool enter_ulps);
+
+       /* bus configuration */
+       int (*set_config)(struct omap_dss_device *dssdev,
+                       const struct omap_dss_dsi_config *cfg);
+       int (*configure_pins)(struct omap_dss_device *dssdev,
+                       const struct omap_dsi_pin_config *pin_cfg);
+
+       void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
+                       bool enable);
+       int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+
+       int (*update)(struct omap_dss_device *dssdev, int channel,
+                       void (*callback)(int, void *), void *data);
+
+       void (*bus_lock)(struct omap_dss_device *dssdev);
+       void (*bus_unlock)(struct omap_dss_device *dssdev);
+
+       int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
+       void (*disable_video_output)(struct omap_dss_device *dssdev,
+                       int channel);
+
+       int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
+       int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
+                       int vc_id);
+       void (*release_vc)(struct omap_dss_device *dssdev, int channel);
+
+       /* data transfer */
+       int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
+                       u8 *data, int len);
+       int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
+                       u8 *data, int len);
+       int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
+                       u8 *data, int len);
+
+       int (*gen_write)(struct omap_dss_device *dssdev, int channel,
+                       u8 *data, int len);
+       int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
+                       u8 *data, int len);
+       int (*gen_read)(struct omap_dss_device *dssdev, int channel,
+                       u8 *reqdata, int reqlen,
+                       u8 *data, int len);
+
+       int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
+
+       int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
+                       int channel, u16 plen);
+};
+
+struct omap_dss_device {
+       struct kobject kobj;
+       struct device *dev;
+
+       struct module *owner;
+
+       struct list_head panel_list;
+
+       /* alias in the form of "display%d" */
+       char alias[16];
+
+       enum omap_display_type type;
+       enum omap_display_type output_type;
+
+       union {
+               struct {
+                       u8 data_lines;
+               } dpi;
+
+               struct {
+                       u8 channel;
+                       u8 data_lines;
+               } rfbi;
+
+               struct {
+                       u8 datapairs;
+               } sdi;
+
+               struct {
+                       int module;
+               } dsi;
+
+               struct {
+                       enum omap_dss_venc_type type;
+                       bool invert_polarity;
+               } venc;
+       } phy;
+
+       struct {
+               struct omap_video_timings timings;
+
+               enum omap_dss_dsi_pixel_format dsi_pix_fmt;
+               enum omap_dss_dsi_mode dsi_mode;
+       } panel;
+
+       struct {
+               u8 pixel_size;
+               struct rfbi_timings rfbi_timings;
+       } ctrl;
+
+       const char *name;
+
+       /* used to match device to driver */
+       const char *driver_name;
+
+       void *data;
+
+       struct omap_dss_driver *driver;
+
+       union {
+               const struct omapdss_dpi_ops *dpi;
+               const struct omapdss_sdi_ops *sdi;
+               const struct omapdss_dvi_ops *dvi;
+               const struct omapdss_hdmi_ops *hdmi;
+               const struct omapdss_atv_ops *atv;
+               const struct omapdss_dsi_ops *dsi;
+       } ops;
+
+       /* helper variable for driver suspend/resume */
+       bool activate_after_resume;
+
+       enum omap_display_caps caps;
+
+       struct omap_dss_device *src;
+
+       enum omap_dss_display_state state;
+
+       /* OMAP DSS output specific fields */
+
+       struct list_head list;
+
+       /* DISPC channel for this output */
+       enum omap_channel dispc_channel;
+       bool dispc_channel_connected;
+
+       /* output instance */
+       enum omap_dss_output_id id;
+
+       /* the port number in the DT node */
+       int port_num;
+
+       /* dynamic fields */
+       struct omap_overlay_manager *manager;
+
+       struct omap_dss_device *dst;
+};
+
+struct omap_dss_driver {
+       int (*probe)(struct omap_dss_device *);
+       void (*remove)(struct omap_dss_device *);
+
+       int (*connect)(struct omap_dss_device *dssdev);
+       void (*disconnect)(struct omap_dss_device *dssdev);
+
+       int (*enable)(struct omap_dss_device *display);
+       void (*disable)(struct omap_dss_device *display);
+       int (*run_test)(struct omap_dss_device *display, int test);
+
+       int (*update)(struct omap_dss_device *dssdev,
+                              u16 x, u16 y, u16 w, u16 h);
+       int (*sync)(struct omap_dss_device *dssdev);
+
+       int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
+       int (*get_te)(struct omap_dss_device *dssdev);
+
+       u8 (*get_rotate)(struct omap_dss_device *dssdev);
+       int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
+
+       bool (*get_mirror)(struct omap_dss_device *dssdev);
+       int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
+
+       int (*memory_read)(struct omap_dss_device *dssdev,
+                       void *buf, size_t size,
+                       u16 x, u16 y, u16 w, u16 h);
+
+       void (*get_resolution)(struct omap_dss_device *dssdev,
+                       u16 *xres, u16 *yres);
+       void (*get_dimensions)(struct omap_dss_device *dssdev,
+                       u32 *width, u32 *height);
+       int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
+
+       int (*check_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*set_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+       void (*get_timings)(struct omap_dss_device *dssdev,
+                       struct omap_video_timings *timings);
+
+       int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
+       u32 (*get_wss)(struct omap_dss_device *dssdev);
+
+       int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+       bool (*detect)(struct omap_dss_device *dssdev);
+
+       int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
+       int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
+               const struct hdmi_avi_infoframe *avi);
+};
+
+enum omapdss_version omapdss_get_version(void);
+bool omapdss_is_initialized(void);
+
+int omap_dss_register_driver(struct omap_dss_driver *);
+void omap_dss_unregister_driver(struct omap_dss_driver *);
+
+int omapdss_register_display(struct omap_dss_device *dssdev);
+void omapdss_unregister_display(struct omap_dss_device *dssdev);
+
+struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
+void omap_dss_put_device(struct omap_dss_device *dssdev);
+#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
+struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
+struct omap_dss_device *omap_dss_find_device(void *data,
+               int (*match)(struct omap_dss_device *dssdev, void *data));
+const char *omapdss_get_default_display_name(void);
+
+void videomode_to_omap_video_timings(const struct videomode *vm,
+               struct omap_video_timings *ovt);
+void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
+               struct videomode *vm);
+
+int dss_feat_get_num_mgrs(void);
+int dss_feat_get_num_ovls(void);
+enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
+
+
+
+int omap_dss_get_num_overlay_managers(void);
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
+
+int omap_dss_get_num_overlays(void);
+struct omap_overlay *omap_dss_get_overlay(int num);
+
+int omapdss_register_output(struct omap_dss_device *output);
+void omapdss_unregister_output(struct omap_dss_device *output);
+struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
+struct omap_dss_device *omap_dss_find_output(const char *name);
+struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
+int omapdss_output_set_device(struct omap_dss_device *out,
+               struct omap_dss_device *dssdev);
+int omapdss_output_unset_device(struct omap_dss_device *out);
+
+struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
+struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
+
+void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
+               u16 *xres, u16 *yres);
+int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+               struct omap_video_timings *timings);
+
+typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
+int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
+
+int omapdss_compat_init(void);
+void omapdss_compat_uninit(void);
+
+static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
+{
+       return dssdev->src;
+}
+
+static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
+{
+       return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
+}
+
+struct device_node *
+omapdss_of_get_next_port(const struct device_node *parent,
+                        struct device_node *prev);
+
+struct device_node *
+omapdss_of_get_next_endpoint(const struct device_node *parent,
+                            struct device_node *prev);
+
+struct device_node *
+omapdss_of_get_first_endpoint(const struct device_node *parent);
+
+struct omap_dss_device *
+omapdss_of_find_source_for_first_ep(struct device_node *node);
+
+#endif /* __OMAPFB_DSS_H */
index f755a602d4a176e006dc2cb5830d1c4812bac81f..c02d89777713b8772e47c2759570f4a838cb18e7 100644 (file)
@@ -1458,6 +1458,7 @@ config KALLSYMS_ALL
 
 config KALLSYMS_ABSOLUTE_PERCPU
        bool
+       depends on KALLSYMS
        default X86_64 && SMP
 
 config KALLSYMS_BASE_RELATIVE
index 4c17fda5c2ff51c33eb4bf5fbd06a810087c3287..eae02aa03c9e80062ba6920c57c347c415e6d0ca 100644 (file)
@@ -453,7 +453,7 @@ void __init __weak smp_setup_processor_id(void)
 }
 
 # if THREAD_SIZE >= PAGE_SIZE
-void __init __weak thread_info_cache_init(void)
+void __init __weak thread_stack_cache_init(void)
 {
 }
 #endif
@@ -627,7 +627,7 @@ asmlinkage __visible void __init start_kernel(void)
        /* Should be run before the first non-init thread is created */
        init_espfix_bsp();
 #endif
-       thread_info_cache_init();
+       thread_stack_cache_init();
        cred_init();
        fork_init();
        proc_caches_init();
@@ -708,11 +708,13 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
 {
        struct blacklist_entry *entry;
        char fn_name[KSYM_SYMBOL_LEN];
+       unsigned long addr;
 
        if (list_empty(&blacklisted_initcalls))
                return false;
 
-       sprint_symbol_no_offset(fn_name, (unsigned long)fn);
+       addr = (unsigned long) dereference_function_descriptor(fn);
+       sprint_symbol_no_offset(fn_name, addr);
 
        list_for_each_entry(entry, &blacklisted_initcalls, next) {
                if (!strcmp(fn_name, entry->buf)) {
index 22bb4f24f071df56dbddcb3f84fbf3ad2e8c63bc..8d528f9930dad639c22c797da715545db717ce6e 100644 (file)
@@ -1883,6 +1883,23 @@ out_null:
        audit_log_format(ab, " exe=(null)");
 }
 
+struct tty_struct *audit_get_tty(struct task_struct *tsk)
+{
+       struct tty_struct *tty = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tsk->sighand->siglock, flags);
+       if (tsk->signal)
+               tty = tty_kref_get(tsk->signal->tty);
+       spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+       return tty;
+}
+
+void audit_put_tty(struct tty_struct *tty)
+{
+       tty_kref_put(tty);
+}
+
 void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
 {
        const struct cred *cred;
index cbbe6bb6496ea934f86ffcfedffe971c76bbb692..a492f4c4e7106aa7c4c23e7bcb8e502477df6638 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/audit.h>
 #include <linux/skbuff.h>
 #include <uapi/linux/mqueue.h>
+#include <linux/tty.h>
 
 /* AUDIT_NAMES is the number of slots we reserve in the audit_context
  * for saving names from getname().  If we get more names we will allocate
@@ -262,6 +263,9 @@ extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
 extern void audit_log_d_path_exe(struct audit_buffer *ab,
                                 struct mm_struct *mm);
 
+extern struct tty_struct *audit_get_tty(struct task_struct *tsk);
+extern void audit_put_tty(struct tty_struct *tty);
+
 /* audit watch functions */
 #ifdef CONFIG_AUDIT_WATCH
 extern void audit_put_watch(struct audit_watch *watch);
index 62ab53d7619cfb249154276e40a38d1e69a906f6..2672d105cffcdaf7703d9f655e7338dd0b102cc0 100644 (file)
@@ -63,7 +63,6 @@
 #include <asm/unistd.h>
 #include <linux/security.h>
 #include <linux/list.h>
-#include <linux/tty.h>
 #include <linux/binfmts.h>
 #include <linux/highmem.h>
 #include <linux/syscalls.h>
@@ -1985,14 +1984,15 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
        if (!audit_enabled)
                return;
 
+       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
+       if (!ab)
+               return;
+
        uid = from_kuid(&init_user_ns, task_uid(current));
        oldloginuid = from_kuid(&init_user_ns, koldloginuid);
        loginuid = from_kuid(&init_user_ns, kloginuid),
        tty = audit_get_tty(current);
 
-       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
-       if (!ab)
-               return;
        audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
        audit_log_task_context(ab);
        audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
index 04be7021f84806aa6777a56dd1c6dc677b46aee6..318858edb1cd82b7d22294f44b3ee18d07613efd 100644 (file)
@@ -365,7 +365,6 @@ static struct file_system_type bpf_fs_type = {
        .name           = "bpf",
        .mount          = bpf_mount,
        .kill_sb        = kill_litter_super,
-       .fs_flags       = FS_USERNS_MOUNT,
 };
 
 MODULE_ALIAS_FS("bpf");
index 668e07903c8f1a3950c4e494eb7443748710f08c..eec9f90ba030410a5104991cfcd377400cb4bb7d 100644 (file)
  * are set to NOT_INIT to indicate that they are no longer readable.
  */
 
-/* types of values stored in eBPF registers */
-enum bpf_reg_type {
-       NOT_INIT = 0,            /* nothing was written into register */
-       UNKNOWN_VALUE,           /* reg doesn't contain a valid pointer */
-       PTR_TO_CTX,              /* reg points to bpf_context */
-       CONST_PTR_TO_MAP,        /* reg points to struct bpf_map */
-       PTR_TO_MAP_VALUE,        /* reg points to map element value */
-       PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
-       FRAME_PTR,               /* reg == frame_pointer */
-       PTR_TO_STACK,            /* reg == frame_pointer + imm */
-       CONST_IMM,               /* constant integer value */
-
-       /* PTR_TO_PACKET represents:
-        * skb->data
-        * skb->data + imm
-        * skb->data + (u16) var
-        * skb->data + (u16) var + imm
-        * if (range > 0) then [ptr, ptr + range - off) is safe to access
-        * if (id > 0) means that some 'var' was added
-        * if (off > 0) menas that 'imm' was added
-        */
-       PTR_TO_PACKET,
-       PTR_TO_PACKET_END,       /* skb->data + headlen */
-};
-
 struct reg_state {
        enum bpf_reg_type type;
        union {
@@ -695,10 +670,10 @@ static int check_packet_access(struct verifier_env *env, u32 regno, int off,
 
 /* check access to 'struct bpf_context' fields */
 static int check_ctx_access(struct verifier_env *env, int off, int size,
-                           enum bpf_access_type t)
+                           enum bpf_access_type t, enum bpf_reg_type *reg_type)
 {
        if (env->prog->aux->ops->is_valid_access &&
-           env->prog->aux->ops->is_valid_access(off, size, t)) {
+           env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
                /* remember the offset of last byte accessed in ctx */
                if (env->prog->aux->max_ctx_offset < off + size)
                        env->prog->aux->max_ctx_offset = off + size;
@@ -798,21 +773,19 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
                        mark_reg_unknown_value(state->regs, value_regno);
 
        } else if (reg->type == PTR_TO_CTX) {
+               enum bpf_reg_type reg_type = UNKNOWN_VALUE;
+
                if (t == BPF_WRITE && value_regno >= 0 &&
                    is_pointer_value(env, value_regno)) {
                        verbose("R%d leaks addr into ctx\n", value_regno);
                        return -EACCES;
                }
-               err = check_ctx_access(env, off, size, t);
+               err = check_ctx_access(env, off, size, t, &reg_type);
                if (!err && t == BPF_READ && value_regno >= 0) {
                        mark_reg_unknown_value(state->regs, value_regno);
-                       if (off == offsetof(struct __sk_buff, data) &&
-                           env->allow_ptr_leaks)
+                       if (env->allow_ptr_leaks)
                                /* note that reg.[id|off|range] == 0 */
-                               state->regs[value_regno].type = PTR_TO_PACKET;
-                       else if (off == offsetof(struct __sk_buff, data_end) &&
-                                env->allow_ptr_leaks)
-                               state->regs[value_regno].type = PTR_TO_PACKET_END;
+                               state->regs[value_regno].type = reg_type;
                }
 
        } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) {
index 86cb5c6e89320f28e17691c6d69e58c9dfde81fb..75c0ff00aca60d298062755539e83cbfeaffaaf2 100644 (file)
@@ -837,6 +837,8 @@ static void put_css_set_locked(struct css_set *cset)
 
 static void put_css_set(struct css_set *cset)
 {
+       unsigned long flags;
+
        /*
         * Ensure that the refcount doesn't hit zero while any readers
         * can see it. Similar to atomic_dec_and_lock(), but for an
@@ -845,9 +847,9 @@ static void put_css_set(struct css_set *cset)
        if (atomic_add_unless(&cset->refcount, -1, 1))
                return;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irqsave(&css_set_lock, flags);
        put_css_set_locked(cset);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irqrestore(&css_set_lock, flags);
 }
 
 /*
@@ -1070,11 +1072,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
 
        /* First see if we already have a cgroup group that matches
         * the desired set */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        cset = find_existing_css_set(old_cset, cgrp, template);
        if (cset)
                get_css_set(cset);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        if (cset)
                return cset;
@@ -1102,7 +1104,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
         * find_existing_css_set() */
        memcpy(cset->subsys, template, sizeof(cset->subsys));
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        /* Add reference counts and links from the new css_set. */
        list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
                struct cgroup *c = link->cgrp;
@@ -1128,7 +1130,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
                css_get(css);
        }
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        return cset;
 }
@@ -1192,7 +1194,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
         * Release all the links from cset_links to this hierarchy's
         * root cgroup
         */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
                list_del(&link->cset_link);
@@ -1200,7 +1202,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
                kfree(link);
        }
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        if (!list_empty(&root->root_list)) {
                list_del(&root->root_list);
@@ -1600,11 +1602,11 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
                ss->root = dst_root;
                css->cgroup = dcgrp;
 
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                hash_for_each(css_set_table, i, cset, hlist)
                        list_move_tail(&cset->e_cset_node[ss->id],
                                       &dcgrp->e_csets[ss->id]);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
 
                /* default hierarchy doesn't enable controllers by default */
                dst_root->subsys_mask |= 1 << ssid;
@@ -1640,10 +1642,10 @@ static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
        if (!buf)
                return -ENOMEM;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
        len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        if (len >= PATH_MAX)
                len = -ERANGE;
@@ -1897,7 +1899,7 @@ static void cgroup_enable_task_cg_lists(void)
 {
        struct task_struct *p, *g;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        if (use_task_css_set_links)
                goto out_unlock;
@@ -1922,8 +1924,12 @@ static void cgroup_enable_task_cg_lists(void)
                 * entry won't be deleted though the process has exited.
                 * Do it while holding siglock so that we don't end up
                 * racing against cgroup_exit().
+                *
+                * Interrupts were already disabled while acquiring
+                * the css_set_lock, so we do not need to disable it
+                * again when acquiring the sighand->siglock here.
                 */
-               spin_lock_irq(&p->sighand->siglock);
+               spin_lock(&p->sighand->siglock);
                if (!(p->flags & PF_EXITING)) {
                        struct css_set *cset = task_css_set(p);
 
@@ -1932,11 +1938,11 @@ static void cgroup_enable_task_cg_lists(void)
                        list_add_tail(&p->cg_list, &cset->tasks);
                        get_css_set(cset);
                }
-               spin_unlock_irq(&p->sighand->siglock);
+               spin_unlock(&p->sighand->siglock);
        } while_each_thread(g, p);
        read_unlock(&tasklist_lock);
 out_unlock:
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 }
 
 static void init_cgroup_housekeeping(struct cgroup *cgrp)
@@ -2043,13 +2049,13 @@ static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
         * Link the root cgroup in this hierarchy into all the css_set
         * objects.
         */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        hash_for_each(css_set_table, i, cset, hlist) {
                link_css_set(&tmp_links, cset, root_cgrp);
                if (css_set_populated(cset))
                        cgroup_update_populated(root_cgrp, true);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        BUG_ON(!list_empty(&root_cgrp->self.children));
        BUG_ON(atomic_read(&root->nr_cgrps) != 1);
@@ -2256,11 +2262,11 @@ out_mount:
                struct cgroup *cgrp;
 
                mutex_lock(&cgroup_mutex);
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
 
                cgrp = cset_cgroup_from_root(ns->root_cset, root);
 
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
                mutex_unlock(&cgroup_mutex);
 
                nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
@@ -2337,11 +2343,11 @@ char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
        char *ret;
 
        mutex_lock(&cgroup_mutex);
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
 
        return ret;
@@ -2369,7 +2375,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
        char *path = NULL;
 
        mutex_lock(&cgroup_mutex);
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
 
@@ -2382,7 +2388,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
                        path = buf;
        }
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
        return path;
 }
@@ -2557,7 +2563,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
         * the new cgroup.  There are no failure cases after here, so this
         * is the commit point.
         */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(cset, &tset->src_csets, mg_node) {
                list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
                        struct css_set *from_cset = task_css_set(task);
@@ -2568,7 +2574,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
                        put_css_set_locked(from_cset);
                }
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        /*
         * Migration is committed, all target tasks are now on dst_csets.
@@ -2597,13 +2603,13 @@ out_cancel_attach:
                }
        } while_each_subsys_mask();
 out_release_tset:
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_splice_init(&tset->dst_csets, &tset->src_csets);
        list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
                list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
                list_del_init(&cset->mg_node);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        return ret;
 }
 
@@ -2634,7 +2640,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
 
        lockdep_assert_held(&cgroup_mutex);
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
                cset->mg_src_cgrp = NULL;
                cset->mg_dst_cgrp = NULL;
@@ -2642,7 +2648,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
                list_del_init(&cset->mg_preload_node);
                put_css_set_locked(cset);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 }
 
 /**
@@ -2783,7 +2789,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
         * already PF_EXITING could be freed from underneath us unless we
         * take an rcu_read_lock.
         */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        rcu_read_lock();
        task = leader;
        do {
@@ -2792,7 +2798,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
                        break;
        } while_each_thread(leader, task);
        rcu_read_unlock();
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        return cgroup_taskset_migrate(&tset, root);
 }
@@ -2816,7 +2822,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
                return -EBUSY;
 
        /* look up all src csets */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        rcu_read_lock();
        task = leader;
        do {
@@ -2826,7 +2832,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
                        break;
        } while_each_thread(leader, task);
        rcu_read_unlock();
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        /* prepare dst csets and commit */
        ret = cgroup_migrate_prepare_dst(&preloaded_csets);
@@ -2859,9 +2865,9 @@ static int cgroup_procs_write_permission(struct task_struct *task,
                struct cgroup *cgrp;
                struct inode *inode;
 
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
 
                while (!cgroup_is_descendant(dst_cgrp, cgrp))
                        cgrp = cgroup_parent(cgrp);
@@ -2962,9 +2968,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
                if (root == &cgrp_dfl_root)
                        continue;
 
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                from_cgrp = task_cgroup_from_root(from, root);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
 
                retval = cgroup_attach_task(from_cgrp, tsk, false);
                if (retval)
@@ -3080,7 +3086,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
        percpu_down_write(&cgroup_threadgroup_rwsem);
 
        /* look up all csses currently attached to @cgrp's subtree */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
                struct cgrp_cset_link *link;
 
@@ -3088,14 +3094,14 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
                        cgroup_migrate_add_src(link->cset, dsct,
                                               &preloaded_csets);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        /* NULL dst indicates self on default hierarchy */
        ret = cgroup_migrate_prepare_dst(&preloaded_csets);
        if (ret)
                goto out_finish;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
                struct task_struct *task, *ntask;
 
@@ -3107,7 +3113,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
                list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
                        cgroup_taskset_add(task, &tset);
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        ret = cgroup_taskset_migrate(&tset, cgrp->root);
 out_finish:
@@ -3908,10 +3914,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
        int count = 0;
        struct cgrp_cset_link *link;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(link, &cgrp->cset_links, cset_link)
                count += atomic_read(&link->cset->refcount);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        return count;
 }
 
@@ -4249,7 +4255,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
 
        memset(it, 0, sizeof(*it));
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        it->ss = css->ss;
 
@@ -4262,7 +4268,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
 
        css_task_iter_advance_css_set(it);
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 }
 
 /**
@@ -4280,7 +4286,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
                it->cur_task = NULL;
        }
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        if (it->task_pos) {
                it->cur_task = list_entry(it->task_pos, struct task_struct,
@@ -4289,7 +4295,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
                css_task_iter_advance(it);
        }
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        return it->cur_task;
 }
@@ -4303,10 +4309,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
 void css_task_iter_end(struct css_task_iter *it)
 {
        if (it->cur_cset) {
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                list_del(&it->iters_node);
                put_css_set_locked(it->cur_cset);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
        }
 
        if (it->cur_task)
@@ -4338,10 +4344,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
        mutex_lock(&cgroup_mutex);
 
        /* all tasks in @from are being moved, all csets are source */
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(link, &from->cset_links, cset_link)
                cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        ret = cgroup_migrate_prepare_dst(&preloaded_csets);
        if (ret)
@@ -5063,6 +5069,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
        memset(css, 0, sizeof(*css));
        css->cgroup = cgrp;
        css->ss = ss;
+       css->id = -1;
        INIT_LIST_HEAD(&css->sibling);
        INIT_LIST_HEAD(&css->children);
        css->serial_nr = css_serial_nr_next++;
@@ -5150,7 +5157,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
 
        err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
        if (err < 0)
-               goto err_free_percpu_ref;
+               goto err_free_css;
        css->id = err;
 
        /* @css is ready to be brought online now, make it visible */
@@ -5174,9 +5181,6 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
 
 err_list_del:
        list_del_rcu(&css->sibling);
-       cgroup_idr_remove(&ss->css_idr, css->id);
-err_free_percpu_ref:
-       percpu_ref_exit(&css->refcnt);
 err_free_css:
        call_rcu(&css->rcu_head, css_free_rcu_fn);
        return ERR_PTR(err);
@@ -5451,10 +5455,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
         */
        cgrp->self.flags &= ~CSS_ONLINE;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(link, &cgrp->cset_links, cset_link)
                link->cset->dead = true;
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
 
        /* initiate massacre of all css's */
        for_each_css(css, ssid, cgrp)
@@ -5725,7 +5729,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
                goto out;
 
        mutex_lock(&cgroup_mutex);
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        for_each_root(root) {
                struct cgroup_subsys *ss;
@@ -5778,7 +5782,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
 
        retval = 0;
 out_unlock:
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
        kfree(buf);
 out:
@@ -5923,13 +5927,13 @@ void cgroup_post_fork(struct task_struct *child)
        if (use_task_css_set_links) {
                struct css_set *cset;
 
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                cset = task_css_set(current);
                if (list_empty(&child->cg_list)) {
                        get_css_set(cset);
                        css_set_move_task(child, NULL, cset, false);
                }
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
        }
 
        /*
@@ -5974,9 +5978,9 @@ void cgroup_exit(struct task_struct *tsk)
        cset = task_css_set(tsk);
 
        if (!list_empty(&tsk->cg_list)) {
-               spin_lock_bh(&css_set_lock);
+               spin_lock_irq(&css_set_lock);
                css_set_move_task(tsk, cset, NULL, false);
-               spin_unlock_bh(&css_set_lock);
+               spin_unlock_irq(&css_set_lock);
        } else {
                get_css_set(cset);
        }
@@ -6044,9 +6048,9 @@ static void cgroup_release_agent(struct work_struct *work)
        if (!pathbuf || !agentbuf)
                goto out;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        if (!path)
                goto out;
 
@@ -6306,12 +6310,12 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
                return ERR_PTR(-EPERM);
 
        mutex_lock(&cgroup_mutex);
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
 
        cset = task_css_set(current);
        get_css_set(cset);
 
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        mutex_unlock(&cgroup_mutex);
 
        new_ns = alloc_cgroup_ns();
@@ -6435,7 +6439,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
        if (!name_buf)
                return -ENOMEM;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        rcu_read_lock();
        cset = rcu_dereference(current->cgroups);
        list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
@@ -6446,7 +6450,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
                           c->root->hierarchy_id, name_buf);
        }
        rcu_read_unlock();
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        kfree(name_buf);
        return 0;
 }
@@ -6457,7 +6461,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
        struct cgroup_subsys_state *css = seq_css(seq);
        struct cgrp_cset_link *link;
 
-       spin_lock_bh(&css_set_lock);
+       spin_lock_irq(&css_set_lock);
        list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
                struct css_set *cset = link->cset;
                struct task_struct *task;
@@ -6480,7 +6484,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
        overflow:
                seq_puts(seq, "  ...\n");
        }
-       spin_unlock_bh(&css_set_lock);
+       spin_unlock_irq(&css_set_lock);
        return 0;
 }
 
index d948e44c471ea89aa2953f7a687c6b5783bbdf0e..7b61887f7ccdf57fdcb3083574a56c5580110d73 100644 (file)
@@ -1201,6 +1201,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
                .teardown               = takedown_cpu,
                .cant_stop              = true,
        },
+#else
+       [CPUHP_BRINGUP_CPU] = { },
 #endif
 };
 
index 274450efea90eebeb5f3fa4ddb777c2ea2e6ef73..43d43a2d5811d548271c138c0e1ed31d541c19ad 100644 (file)
@@ -1678,12 +1678,33 @@ static bool is_orphaned_event(struct perf_event *event)
        return event->state == PERF_EVENT_STATE_DEAD;
 }
 
-static inline int pmu_filter_match(struct perf_event *event)
+static inline int __pmu_filter_match(struct perf_event *event)
 {
        struct pmu *pmu = event->pmu;
        return pmu->filter_match ? pmu->filter_match(event) : 1;
 }
 
+/*
+ * Check whether we should attempt to schedule an event group based on
+ * PMU-specific filtering. An event group can consist of HW and SW events,
+ * potentially with a SW leader, so we must check all the filters, to
+ * determine whether a group is schedulable:
+ */
+static inline int pmu_filter_match(struct perf_event *event)
+{
+       struct perf_event *child;
+
+       if (!__pmu_filter_match(event))
+               return 0;
+
+       list_for_each_entry(child, &event->sibling_list, group_entry) {
+               if (!__pmu_filter_match(child))
+                       return 0;
+       }
+
+       return 1;
+}
+
 static inline int
 event_filter_match(struct perf_event *event)
 {
@@ -3862,10 +3883,8 @@ static void _free_event(struct perf_event *event)
        if (event->ctx)
                put_ctx(event->ctx);
 
-       if (event->pmu) {
-               exclusive_event_destroy(event);
-               module_put(event->pmu->module);
-       }
+       exclusive_event_destroy(event);
+       module_put(event->pmu->module);
 
        call_rcu(&event->rcu_head, free_event_rcu);
 }
@@ -7531,7 +7550,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
        prog = event->tp_event->prog;
        if (prog) {
                event->tp_event->prog = NULL;
-               bpf_prog_put(prog);
+               bpf_prog_put_rcu(prog);
        }
 }
 
index 5c2c355aa97ff4552c151f796d18cc8732017677..4a7ec0c6c88c9ea85b2c74c6a749e11533220d44 100644 (file)
@@ -148,18 +148,18 @@ static inline void free_task_struct(struct task_struct *tsk)
 }
 #endif
 
-void __weak arch_release_thread_info(struct thread_info *ti)
+void __weak arch_release_thread_stack(unsigned long *stack)
 {
 }
 
-#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
+#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
 
 /*
  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
  * kmemcache based allocator.
  */
 # if THREAD_SIZE >= PAGE_SIZE
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
                                                  int node)
 {
        struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
@@ -172,33 +172,33 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
        return page ? page_address(page) : NULL;
 }
 
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_stack(unsigned long *stack)
 {
-       struct page *page = virt_to_page(ti);
+       struct page *page = virt_to_page(stack);
 
        memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
                                    -(1 << THREAD_SIZE_ORDER));
        __free_kmem_pages(page, THREAD_SIZE_ORDER);
 }
 # else
-static struct kmem_cache *thread_info_cache;
+static struct kmem_cache *thread_stack_cache;
 
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
                                                  int node)
 {
-       return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
+       return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
 }
 
-static void free_thread_info(struct thread_info *ti)
+static void free_thread_stack(unsigned long *stack)
 {
-       kmem_cache_free(thread_info_cache, ti);
+       kmem_cache_free(thread_stack_cache, stack);
 }
 
-void thread_info_cache_init(void)
+void thread_stack_cache_init(void)
 {
-       thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+       thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
                                              THREAD_SIZE, 0, NULL);
-       BUG_ON(thread_info_cache == NULL);
+       BUG_ON(thread_stack_cache == NULL);
 }
 # endif
 #endif
@@ -221,9 +221,9 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(unsigned long *stack, int account)
 {
-       struct zone *zone = page_zone(virt_to_page(ti));
+       struct zone *zone = page_zone(virt_to_page(stack));
 
        mod_zone_page_state(zone, NR_KERNEL_STACK, account);
 }
@@ -231,8 +231,8 @@ static void account_kernel_stack(struct thread_info *ti, int account)
 void free_task(struct task_struct *tsk)
 {
        account_kernel_stack(tsk->stack, -1);
-       arch_release_thread_info(tsk->stack);
-       free_thread_info(tsk->stack);
+       arch_release_thread_stack(tsk->stack);
+       free_thread_stack(tsk->stack);
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
        put_seccomp_filter(tsk);
@@ -343,7 +343,7 @@ void set_task_stack_end_magic(struct task_struct *tsk)
 static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 {
        struct task_struct *tsk;
-       struct thread_info *ti;
+       unsigned long *stack;
        int err;
 
        if (node == NUMA_NO_NODE)
@@ -352,15 +352,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        if (!tsk)
                return NULL;
 
-       ti = alloc_thread_info_node(tsk, node);
-       if (!ti)
+       stack = alloc_thread_stack_node(tsk, node);
+       if (!stack)
                goto free_tsk;
 
        err = arch_dup_task_struct(tsk, orig);
        if (err)
-               goto free_ti;
+               goto free_stack;
 
-       tsk->stack = ti;
+       tsk->stack = stack;
 #ifdef CONFIG_SECCOMP
        /*
         * We must handle setting up seccomp filters once we're under
@@ -392,14 +392,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        tsk->task_frag.page = NULL;
        tsk->wake_q.next = NULL;
 
-       account_kernel_stack(ti, 1);
+       account_kernel_stack(stack, 1);
 
        kcov_task_init(tsk);
 
        return tsk;
 
-free_ti:
-       free_thread_info(ti);
+free_stack:
+       free_thread_stack(stack);
 free_tsk:
        free_task_struct(tsk);
        return NULL;
index ee25f5ba4aca13f8a05a9e6c3a4c5a2aff768131..33664f70e2d25e880efdbc8695fcc12989871150 100644 (file)
@@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 {
        unsigned long address = (unsigned long)uaddr;
        struct mm_struct *mm = current->mm;
-       struct page *page;
+       struct page *page, *tail;
        struct address_space *mapping;
        int err, ro = 0;
 
@@ -530,7 +530,15 @@ again:
         * considered here and page lock forces unnecessarily serialization
         * From this point on, mapping will be re-verified if necessary and
         * page lock will be acquired only if it is unavoidable
-        */
+        *
+        * Mapping checks require the head page for any compound page so the
+        * head page and mapping is looked up now. For anonymous pages, it
+        * does not matter if the page splits in the future as the key is
+        * based on the address. For filesystem-backed pages, the tail is
+        * required as the index of the page determines the key. For
+        * base pages, there is no tail page and tail == page.
+        */
+       tail = page;
        page = compound_head(page);
        mapping = READ_ONCE(page->mapping);
 
@@ -654,7 +662,7 @@ again:
 
                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
                key->shared.inode = inode;
-               key->shared.pgoff = basepage_index(page);
+               key->shared.pgoff = basepage_index(tail);
                rcu_read_unlock();
        }
 
index e25e92fb44face315265d43d981b71e193693f2a..6a5c239c7669c5ad8e01565be5a6fd38ae7d4452 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/vmalloc.h>
 #include "gcov.h"
 
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
 #define GCOV_COUNTERS                  10
 #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
 #define GCOV_COUNTERS                  9
index c42742208e5eefb028e084398d380cb4fe0e4e64..89b49f6773f02227eb84df54f214110ef1c101f8 100644 (file)
@@ -125,7 +125,7 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
 
        domain = data->domain;
        if (WARN_ON(domain == NULL))
-               return;
+               return -EINVAL;
 
        if (!irq_domain_is_ipi(domain)) {
                pr_warn("Trying to destroy a non IPI domain!\n");
index 05254eeb4b4e485be75bacff667c8ed3aab4a200..4b353e0be1215a081e2cf367d3a3074700e4a685 100644 (file)
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
 
 void static_key_slow_inc(struct static_key *key)
 {
+       int v, v1;
+
        STATIC_KEY_CHECK_USE();
-       if (atomic_inc_not_zero(&key->enabled))
-               return;
+
+       /*
+        * Careful if we get concurrent static_key_slow_inc() calls;
+        * later calls must wait for the first one to _finish_ the
+        * jump_label_update() process.  At the same time, however,
+        * the jump_label_update() call below wants to see
+        * static_key_enabled(&key) for jumps to be updated properly.
+        *
+        * So give a special meaning to negative key->enabled: it sends
+        * static_key_slow_inc() down the slow path, and it is non-zero
+        * so it counts as "enabled" in jump_label_update().  Note that
+        * atomic_inc_unless_negative() checks >= 0, so roll our own.
+        */
+       for (v = atomic_read(&key->enabled); v > 0; v = v1) {
+               v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
+               if (likely(v1 == v))
+                       return;
+       }
 
        jump_label_lock();
-       if (atomic_inc_return(&key->enabled) == 1)
+       if (atomic_read(&key->enabled) == 0) {
+               atomic_set(&key->enabled, -1);
                jump_label_update(key);
+               atomic_set(&key->enabled, 1);
+       } else {
+               atomic_inc(&key->enabled);
+       }
        jump_label_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
 static void __static_key_slow_dec(struct static_key *key,
                unsigned long rate_limit, struct delayed_work *work)
 {
+       /*
+        * The negative count check is valid even when a negative
+        * key->enabled is in use by static_key_slow_inc(); a
+        * __static_key_slow_dec() before the first static_key_slow_inc()
+        * returns is unbalanced, because all other static_key_slow_inc()
+        * instances block while the update is in progress.
+        */
        if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
                WARN(atomic_read(&key->enabled) < 0,
                     "jump label: negative count!\n");
index a02f2dddd1d710b7ea33bd58963e2d9d388266c1..8d44b3fea9d08f901e3b24ed4a619a4883d4042f 100644 (file)
@@ -264,7 +264,12 @@ static const struct file_operations kcov_fops = {
 
 static int __init kcov_init(void)
 {
-       if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
+       /*
+        * The kcov debugfs file won't ever get removed and thus,
+        * there is no need to protect it against removal races. The
+        * use of debugfs_create_file_unsafe() is actually safe here.
+        */
+       if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
                pr_err("failed to create kcov in debugfs\n");
                return -ENOMEM;
        }
index 3ef3736002d895854794a4d940adcd96288640f2..9c951fade415be1549b568d86275e0327bbdba58 100644 (file)
@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
 }
 
 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                           struct thread_info *ti)
+                           struct task_struct *task)
 {
        SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
 
        /* Mark the current thread as blocked on the lock: */
-       ti->task->blocked_on = waiter;
+       task->blocked_on = waiter;
 }
 
 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                        struct thread_info *ti)
+                        struct task_struct *task)
 {
        DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
-       DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
-       DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
-       ti->task->blocked_on = NULL;
+       DEBUG_LOCKS_WARN_ON(waiter->task != task);
+       DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+       task->blocked_on = NULL;
 
        list_del_init(&waiter->list);
        waiter->task = NULL;
index 0799fd3e4cfacbfdac923c6dedba278dbc36e0f9..d06ae3bb46c5f9f928cf7123464f49e284ab3ecf 100644 (file)
@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
 extern void debug_mutex_add_waiter(struct mutex *lock,
                                   struct mutex_waiter *waiter,
-                                  struct thread_info *ti);
+                                  struct task_struct *task);
 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                               struct thread_info *ti);
+                               struct task_struct *task);
 extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name,
                             struct lock_class_key *key);
index e364b424b019ff51de0bde17fe2611444b5c1efb..a70b90db3909c1d3a3cbfaf45d6e27f90a786b1b 100644 (file)
@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
        if (!hold_ctx)
                return 0;
 
-       if (unlikely(ctx == hold_ctx))
-               return -EALREADY;
-
        if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
            (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        unsigned long flags;
        int ret;
 
+       if (use_ww_ctx) {
+               struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+               if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
+                       return -EALREADY;
+       }
+
        preempt_disable();
        mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
@@ -534,7 +537,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                goto skip_wait;
 
        debug_mutex_lock_common(lock, &waiter);
-       debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
+       debug_mutex_add_waiter(lock, &waiter, task);
 
        /* add waiting tasks to the end of the waitqueue (FIFO): */
        list_add_tail(&waiter.list, &lock->wait_list);
@@ -581,7 +584,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        }
        __set_task_state(task, TASK_RUNNING);
 
-       mutex_remove_waiter(lock, &waiter, current_thread_info());
+       mutex_remove_waiter(lock, &waiter, task);
        /* set it to 0 if there are no waiters left: */
        if (likely(list_empty(&lock->wait_list)))
                atomic_set(&lock->count, 0);
@@ -602,7 +605,7 @@ skip_wait:
        return 0;
 
 err:
-       mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+       mutex_remove_waiter(lock, &waiter, task);
        spin_unlock_mutex(&lock->wait_lock, flags);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
index 5cda397607f25011eedcc4cbcab7aeef9d211321..a68bae5e852a08f1c0d75587b57c5e31835f937e 100644 (file)
@@ -13,7 +13,7 @@
                do { spin_lock(lock); (void)(flags); } while (0)
 #define spin_unlock_mutex(lock, flags) \
                do { spin_unlock(lock); (void)(flags); } while (0)
-#define mutex_remove_waiter(lock, waiter, ti) \
+#define mutex_remove_waiter(lock, waiter, task) \
                __list_del((waiter)->list.prev, (waiter)->list.next)
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
index ce2f75e32ae155b30cfe324c56f9bc526771b66a..5fc8c311b8fe59d46decc2c5a049ce6e860a07b8 100644 (file)
@@ -267,6 +267,66 @@ static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
 #define queued_spin_lock_slowpath      native_queued_spin_lock_slowpath
 #endif
 
+/*
+ * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
+ * issuing an _unordered_ store to set _Q_LOCKED_VAL.
+ *
+ * This means that the store can be delayed, but no later than the
+ * store-release from the unlock. This means that simply observing
+ * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
+ *
+ * There are two paths that can issue the unordered store:
+ *
+ *  (1) clear_pending_set_locked():    *,1,0 -> *,0,1
+ *
+ *  (2) set_locked():                  t,0,0 -> t,0,1 ; t != 0
+ *      atomic_cmpxchg_relaxed():      t,0,0 -> 0,0,1
+ *
+ * However, in both cases we have other !0 state we've set before to queue
+ * ourseves:
+ *
+ * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
+ * load is constrained by that ACQUIRE to not pass before that, and thus must
+ * observe the store.
+ *
+ * For (2) we have a more intersting scenario. We enqueue ourselves using
+ * xchg_tail(), which ends up being a RELEASE. This in itself is not
+ * sufficient, however that is followed by an smp_cond_acquire() on the same
+ * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
+ * guarantees we must observe that store.
+ *
+ * Therefore both cases have other !0 state that is observable before the
+ * unordered locked byte store comes through. This means we can use that to
+ * wait for the lock store, and then wait for an unlock.
+ */
+#ifndef queued_spin_unlock_wait
+void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+       u32 val;
+
+       for (;;) {
+               val = atomic_read(&lock->val);
+
+               if (!val) /* not locked, we're done */
+                       goto done;
+
+               if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
+                       break;
+
+               /* not locked, but pending, wait until we observe the lock */
+               cpu_relax();
+       }
+
+       /* any unlock is good */
+       while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+               cpu_relax();
+
+done:
+       smp_rmb(); /* CTRL + RMB -> ACQUIRE */
+}
+EXPORT_SYMBOL(queued_spin_unlock_wait);
+#endif
+
 #endif /* _GEN_PV_LOCK_SLOWPATH */
 
 /**
index df058bed53cebea8172fceaa1a97041d29a78fa6..0c2ee9761d57b7aa585e0191b5fbfcd7d84d6ae4 100644 (file)
@@ -146,6 +146,18 @@ int freeze_processes(void)
        if (!error && !oom_killer_disable())
                error = -EBUSY;
 
+       /*
+        * There is a hard to fix race between oom_reaper kernel thread
+        * and oom_killer_disable. oom_reaper calls exit_oom_victim
+        * before the victim reaches exit_mm so try to freeze all the tasks
+        * again and catch such a left over task.
+        */
+       if (!error) {
+               pr_info("Double checking all user space processes after OOM killer disable... ");
+               error = try_to_freeze_tasks(true);
+               pr_cont("\n");
+       }
+
        if (error)
                thaw_processes();
        return error;
index 074994bcfa9be14336ecb06912a969f6d9546c9d..04d7cf3ef8cf932e036c7c76f6a27cefa962efc0 100644 (file)
@@ -614,6 +614,7 @@ free_bufs:
 
        kref_put(&chan->kref, relay_destroy_channel);
        mutex_unlock(&relay_channels_mutex);
+       kfree(chan);
        return NULL;
 }
 EXPORT_SYMBOL_GPL(relay_open);
index 7f2cae4620c7a949fdc294c06211884e2ca83afb..97ee9ac7e97c7743249295fa0c4d2da32740bbf7 100644 (file)
@@ -1536,7 +1536,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
        for (;;) {
                /* Any allowed, online CPU? */
                for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
-                       if (!cpu_active(dest_cpu))
+                       if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
+                               continue;
+                       if (!cpu_online(dest_cpu))
                                continue;
                        goto out;
                }
@@ -2253,9 +2255,11 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
 #endif
 #endif
 
+#ifdef CONFIG_SCHEDSTATS
+
 DEFINE_STATIC_KEY_FALSE(sched_schedstats);
+static bool __initdata __sched_schedstats = false;
 
-#ifdef CONFIG_SCHEDSTATS
 static void set_schedstats(bool enabled)
 {
        if (enabled)
@@ -2278,11 +2282,16 @@ static int __init setup_schedstats(char *str)
        if (!str)
                goto out;
 
+       /*
+        * This code is called before jump labels have been set up, so we can't
+        * change the static branch directly just yet.  Instead set a temporary
+        * variable so init_schedstats() can do it later.
+        */
        if (!strcmp(str, "enable")) {
-               set_schedstats(true);
+               __sched_schedstats = true;
                ret = 1;
        } else if (!strcmp(str, "disable")) {
-               set_schedstats(false);
+               __sched_schedstats = false;
                ret = 1;
        }
 out:
@@ -2293,6 +2302,11 @@ out:
 }
 __setup("schedstats=", setup_schedstats);
 
+static void __init init_schedstats(void)
+{
+       set_schedstats(__sched_schedstats);
+}
+
 #ifdef CONFIG_PROC_SYSCTL
 int sysctl_schedstats(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -2313,8 +2327,10 @@ int sysctl_schedstats(struct ctl_table *table, int write,
                set_schedstats(state);
        return err;
 }
-#endif
-#endif
+#endif /* CONFIG_PROC_SYSCTL */
+#else  /* !CONFIG_SCHEDSTATS */
+static inline void init_schedstats(void) {}
+#endif /* CONFIG_SCHEDSTATS */
 
 /*
  * fork()/clone()-time setup:
@@ -2521,10 +2537,9 @@ void wake_up_new_task(struct task_struct *p)
         */
        set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
 #endif
-       /* Post initialize new task's util average when its cfs_rq is set */
+       rq = __task_rq_lock(p, &rf);
        post_init_entity_util_avg(&p->se);
 
-       rq = __task_rq_lock(p, &rf);
        activate_task(rq, p, 0);
        p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p);
@@ -3156,7 +3171,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
 static inline void schedule_debug(struct task_struct *prev)
 {
 #ifdef CONFIG_SCHED_STACK_END_CHECK
-       BUG_ON(task_stack_end_corrupted(prev));
+       if (task_stack_end_corrupted(prev))
+               panic("corrupted stack end detected inside scheduler\n");
 #endif
 
        if (unlikely(in_atomic_preempt_off())) {
@@ -5133,14 +5149,16 @@ void show_state_filter(unsigned long state_filter)
                /*
                 * reset the NMI-timeout, listing all files on a slow
                 * console might take a lot of time:
+                * Also, reset softlockup watchdogs on all CPUs, because
+                * another CPU might be blocked waiting for us to process
+                * an IPI.
                 */
                touch_nmi_watchdog();
+               touch_all_softlockup_watchdogs();
                if (!state_filter || (p->state & state_filter))
                        sched_show_task(p);
        }
 
-       touch_all_softlockup_watchdogs();
-
 #ifdef CONFIG_SCHED_DEBUG
        if (!state_filter)
                sysrq_sched_debug_show();
@@ -5376,13 +5394,15 @@ void idle_task_exit(void)
 /*
  * Since this CPU is going 'away' for a while, fold any nr_active delta
  * we might have. Assumes we're called after migrate_tasks() so that the
- * nr_active count is stable.
+ * nr_active count is stable. We need to take the teardown thread which
+ * is calling this into account, so we hand in adjust = 1 to the load
+ * calculation.
  *
  * Also see the comment "Global load-average calculations".
  */
 static void calc_load_migrate(struct rq *rq)
 {
-       long delta = calc_load_fold_active(rq);
+       long delta = calc_load_fold_active(rq, 1);
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 }
@@ -7487,6 +7507,8 @@ void __init sched_init(void)
 #endif
        init_sched_fair_class();
 
+       init_schedstats();
+
        scheduler_running = 1;
 }
 
index cf905f655ba120fd91d2dd023c6a5a93ad699d4b..0368c393a3362d981e79745716cbb59b2989dac8 100644 (file)
@@ -427,19 +427,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                SPLIT_NS(p->se.vruntime),
                (long long)(p->nvcsw + p->nivcsw),
                p->prio);
-#ifdef CONFIG_SCHEDSTATS
-       if (schedstat_enabled()) {
-               SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-                       SPLIT_NS(p->se.statistics.wait_sum),
-                       SPLIT_NS(p->se.sum_exec_runtime),
-                       SPLIT_NS(p->se.statistics.sum_sleep_runtime));
-       }
-#else
+
        SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-               0LL, 0L,
+               SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
                SPLIT_NS(p->se.sum_exec_runtime),
-               0LL, 0L);
-#endif
+               SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
+
 #ifdef CONFIG_NUMA_BALANCING
        SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
 #endif
index 218f8e83db731e4afe4d4aaf7d7919a588087a12..c8c5d2d484249048cca7dd62fdfc38fbe7de2883 100644 (file)
@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
        }
 }
 
-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
 #else
 void init_entity_runnable_average(struct sched_entity *se)
 {
@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 # ifdef CONFIG_SMP
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-       long tg_weight;
+       long tg_weight, load, shares;
 
        /*
-        * Use this CPU's real-time load instead of the last load contribution
-        * as the updating of the contribution is delayed, and we will use the
-        * the real-time load to calc the share. See update_tg_load_avg().
+        * This really should be: cfs_rq->avg.load_avg, but instead we use
+        * cfs_rq->load.weight, which is its upper bound. This helps ramp up
+        * the shares for small weight interactive tasks.
         */
-       tg_weight = atomic_long_read(&tg->load_avg);
-       tg_weight -= cfs_rq->tg_load_avg_contrib;
-       tg_weight += cfs_rq->load.weight;
+       load = scale_load_down(cfs_rq->load.weight);
 
-       return tg_weight;
-}
-
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
-       long tg_weight, load, shares;
+       tg_weight = atomic_long_read(&tg->load_avg);
 
-       tg_weight = calc_tg_weight(tg, cfs_rq);
-       load = cfs_rq->load.weight;
+       /* Ensure tg_weight >= load */
+       tg_weight -= cfs_rq->tg_load_avg_contrib;
+       tg_weight += load;
 
        shares = (tg->shares * load);
        if (tg_weight)
@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
        return tg->shares;
 }
 # endif /* CONFIG_SMP */
+
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                            unsigned long weight)
 {
@@ -2904,6 +2897,23 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
        }
 }
 
+/*
+ * Unsigned subtract and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define sub_positive(_ptr, _val) do {                          \
+       typeof(_ptr) ptr = (_ptr);                              \
+       typeof(*ptr) val = (_val);                              \
+       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
+       res = var - val;                                        \
+       if (res > var)                                          \
+               res = 0;                                        \
+       WRITE_ONCE(*ptr, res);                                  \
+} while (0)
+
 /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
 static inline int
 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
@@ -2913,15 +2923,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 
        if (atomic_long_read(&cfs_rq->removed_load_avg)) {
                s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
-               sa->load_avg = max_t(long, sa->load_avg - r, 0);
-               sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+               sub_positive(&sa->load_avg, r);
+               sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
                removed_load = 1;
        }
 
        if (atomic_long_read(&cfs_rq->removed_util_avg)) {
                long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
-               sa->util_avg = max_t(long, sa->util_avg - r, 0);
-               sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
+               sub_positive(&sa->util_avg, r);
+               sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
                removed_util = 1;
        }
 
@@ -2994,10 +3004,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
                          &se->avg, se->on_rq * scale_load_down(se->load.weight),
                          cfs_rq->curr == se, NULL);
 
-       cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
-       cfs_rq->avg.load_sum = max_t(s64,  cfs_rq->avg.load_sum - se->avg.load_sum, 0);
-       cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
-       cfs_rq->avg.util_sum = max_t(s32,  cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+       sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+       sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
+       sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
+       sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
 
        cfs_rq_util_change(cfs_rq);
 }
@@ -3246,7 +3256,7 @@ static inline void check_schedstat_required(void)
                        trace_sched_stat_iowait_enabled()  ||
                        trace_sched_stat_blocked_enabled() ||
                        trace_sched_stat_runtime_enabled())  {
-               pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
+               printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
                             "stat_blocked and stat_runtime require the "
                             "kernel parameter schedstats=enabled or "
                             "kernel.sched_schedstats=1\n");
@@ -4185,6 +4195,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
        if (!cfs_bandwidth_used())
                return;
 
+       /* Synchronize hierarchical throttle counter: */
+       if (unlikely(!cfs_rq->throttle_uptodate)) {
+               struct rq *rq = rq_of(cfs_rq);
+               struct cfs_rq *pcfs_rq;
+               struct task_group *tg;
+
+               cfs_rq->throttle_uptodate = 1;
+
+               /* Get closest up-to-date node, because leaves go first: */
+               for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
+                       pcfs_rq = tg->cfs_rq[cpu_of(rq)];
+                       if (pcfs_rq->throttle_uptodate)
+                               break;
+               }
+               if (tg) {
+                       cfs_rq->throttle_count = pcfs_rq->throttle_count;
+                       cfs_rq->throttled_clock_task = rq_clock_task(rq);
+               }
+       }
+
        /* an active group must be handled by the update_curr()->put() path */
        if (!cfs_rq->runtime_enabled || cfs_rq->curr)
                return;
@@ -4500,15 +4530,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 
                /* Don't dequeue parent if it has other entities besides us */
                if (cfs_rq->load.weight) {
+                       /* Avoid re-evaluating load for this entity: */
+                       se = parent_entity(se);
                        /*
                         * Bias pick_next to pick a task from this cfs_rq, as
                         * p is sleeping when it is within its sched_slice.
                         */
-                       if (task_sleep && parent_entity(se))
-                               set_next_buddy(parent_entity(se));
-
-                       /* avoid re-evaluating load for this entity */
-                       se = parent_entity(se);
+                       if (task_sleep && se && !throttled_hierarchy(cfs_rq))
+                               set_next_buddy(se);
                        break;
                }
                flags |= DEQUEUE_SLEEP;
@@ -4910,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                return wl;
 
        for_each_sched_entity(se) {
-               long w, W;
+               struct cfs_rq *cfs_rq = se->my_q;
+               long W, w = cfs_rq_load_avg(cfs_rq);
 
-               tg = se->my_q->tg;
+               tg = cfs_rq->tg;
 
                /*
                 * W = @wg + \Sum rw_j
                 */
-               W = wg + calc_tg_weight(tg, se->my_q);
+               W = wg + atomic_long_read(&tg->load_avg);
+
+               /* Ensure \Sum rw_j >= rw_i */
+               W -= cfs_rq->tg_load_avg_contrib;
+               W += w;
 
                /*
                 * w = rw_i + @wl
                 */
-               w = cfs_rq_load_avg(se->my_q) + wl;
+               w += wl;
 
                /*
                 * wl = S * s'_i; see (2)
@@ -8496,8 +8530,9 @@ void free_fair_sched_group(struct task_group *tg)
 
 int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
 {
-       struct cfs_rq *cfs_rq;
        struct sched_entity *se;
+       struct cfs_rq *cfs_rq;
+       struct rq *rq;
        int i;
 
        tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8512,6 +8547,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
        init_cfs_bandwidth(tg_cfs_bandwidth(tg));
 
        for_each_possible_cpu(i) {
+               rq = cpu_rq(i);
+
                cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
                                      GFP_KERNEL, cpu_to_node(i));
                if (!cfs_rq)
@@ -8525,7 +8562,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
                init_cfs_rq(cfs_rq);
                init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
                init_entity_runnable_average(se);
+
+               raw_spin_lock_irq(&rq->lock);
                post_init_entity_util_avg(se);
+               raw_spin_unlock_irq(&rq->lock);
        }
 
        return 1;
index bd12c6c714ecea0718565df4539304e3bf9d9e57..c5aeedf4e93ad8f8f5c2edaf7248ac0ce9c47e8e 100644 (file)
@@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
  */
 static void cpuidle_idle_call(void)
 {
-       struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+       struct cpuidle_device *dev = cpuidle_get_device();
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int next_state, entered_state;
 
index b0b93fd33af9e4bb4d61edcda77d3b761cb9b8de..a2d6eb71f06b80527b86dd99a83f6d4621265cec 100644 (file)
@@ -78,11 +78,11 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
        loads[2] = (avenrun[2] + offset) << shift;
 }
 
-long calc_load_fold_active(struct rq *this_rq)
+long calc_load_fold_active(struct rq *this_rq, long adjust)
 {
        long nr_active, delta = 0;
 
-       nr_active = this_rq->nr_running;
+       nr_active = this_rq->nr_running - adjust;
        nr_active += (long)this_rq->nr_uninterruptible;
 
        if (nr_active != this_rq->calc_load_active) {
@@ -188,7 +188,7 @@ void calc_load_enter_idle(void)
         * We're going into NOHZ mode, if there's any pending delta, fold it
         * into the pending idle delta.
         */
-       delta = calc_load_fold_active(this_rq);
+       delta = calc_load_fold_active(this_rq, 0);
        if (delta) {
                int idx = calc_load_write_idx();
 
@@ -389,7 +389,7 @@ void calc_global_load_tick(struct rq *this_rq)
        if (time_before(jiffies, this_rq->calc_load_update))
                return;
 
-       delta  = calc_load_fold_active(this_rq);
+       delta  = calc_load_fold_active(this_rq, 0);
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 
index 72f1f3087b04a72fc0afc9ff81d768018eb05391..898c0d2f18feb78e73746f0f3546b6f4de375dca 100644 (file)
@@ -28,7 +28,7 @@ extern unsigned long calc_load_update;
 extern atomic_long_t calc_load_tasks;
 
 extern void calc_global_load_tick(struct rq *this_rq);
-extern long calc_load_fold_active(struct rq *this_rq);
+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
 
 #ifdef CONFIG_SMP
 extern void cpu_load_update_active(struct rq *this_rq);
@@ -437,7 +437,7 @@ struct cfs_rq {
 
        u64 throttled_clock, throttled_clock_task;
        u64 throttled_clock_task_time;
-       int throttled, throttle_count;
+       int throttled, throttle_count, throttle_uptodate;
        struct list_head throttled_list;
 #endif /* CONFIG_CFS_BANDWIDTH */
 #endif /* CONFIG_FAIR_GROUP_SCHED */
index 70b3b6a20fb0e362f4c816fe0f069c7c8576c7f4..78955cbea31c4a5378bc0b73f5f3bbcbdfa23d75 100644 (file)
@@ -33,6 +33,8 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 # define schedstat_inc(rq, field)      do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
 # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
 # define schedstat_set(var, val)       do { if (schedstat_enabled()) { var = (val); } } while (0)
+# define schedstat_val(rq, field)      ((schedstat_enabled()) ? (rq)->field : 0)
+
 #else /* !CONFIG_SCHEDSTATS */
 static inline void
 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
@@ -47,6 +49,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 # define schedstat_inc(rq, field)      do { } while (0)
 # define schedstat_add(rq, field, amt) do { } while (0)
 # define schedstat_set(var, val)       do { } while (0)
+# define schedstat_val(rq, field)      0
 #endif
 
 #ifdef CONFIG_SCHED_INFO
index 8c7392c4fdbd9ace42b096a2710257f1ea70e5ca..e99df0ff1d424528dd3e54663472dd5fc4a6c796 100644 (file)
@@ -425,6 +425,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer)
 {
        debug_object_free(timer, &hrtimer_debug_descr);
 }
+EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
 
 #else
 static inline void debug_hrtimer_init(struct hrtimer *timer) { }
index 1cafba860b08ceb6030fa2a1f237e3950a4e0aa7..39008d78927acb4f9a62f582cb7baba3f27620ee 100644 (file)
@@ -777,6 +777,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
                        timer->it.cpu.expires = 0;
                        sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
                                           &itp->it_value);
+                       return;
                } else {
                        cpu_timer_sample_group(timer->it_clock, p, &now);
                        unlock_task_sighand(p, &flags);
index 780bcbe1d4de33bbb24d2328b231bb056bc641b5..26f603da7e26867fc47835a8fcd3eca4121e2d86 100644 (file)
@@ -198,7 +198,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       file = (struct file *)array->ptrs[index];
+       file = READ_ONCE(array->ptrs[index]);
        if (unlikely(!file))
                return -ENOENT;
 
@@ -209,6 +209,10 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
            event->pmu->count)
                return -EINVAL;
 
+       if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
+                    event->attr.type != PERF_TYPE_RAW))
+               return -EINVAL;
+
        /*
         * we don't know if the function is run successfully by the
         * return value. It can be judged in other places, such as
@@ -247,7 +251,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       file = (struct file *)array->ptrs[index];
+       file = READ_ONCE(array->ptrs[index]);
        if (unlikely(!file))
                return -ENOENT;
 
@@ -349,7 +353,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
 }
 
 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
-static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+                                       enum bpf_reg_type *reg_type)
 {
        /* check bounds */
        if (off < 0 || off >= sizeof(struct pt_regs))
@@ -427,7 +432,8 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
        }
 }
 
-static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type)
+static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
+                                   enum bpf_reg_type *reg_type)
 {
        if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
                return false;
index f96f0383f6c6bfe40cba9226b97f8885723a4d9d..ad1d6164e94603cf29f1b26f0a58f8c37988d57b 100644 (file)
@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
 static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
 {
        struct trace_bprintk_fmt *pos;
+
+       if (!fmt)
+               return ERR_PTR(-EINVAL);
+
        list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
                if (!strcmp(pos->fmt, fmt))
                        return pos;
@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
        for (iter = start; iter < end; iter++) {
                struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
                if (tb_fmt) {
-                       *iter = tb_fmt->fmt;
+                       if (!IS_ERR(tb_fmt))
+                               *iter = tb_fmt->fmt;
                        continue;
                }
 
index e1c0e996b5ae63175feb24d5d6a6c8b541b63287..97e7b793df35be3a1ce01d3bf22ed2dc2d28fd84 100644 (file)
@@ -4600,15 +4600,11 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
        if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
                return;
 
-       /* is @cpu the only online CPU? */
        cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
-       if (cpumask_weight(&cpumask) != 1)
-               return;
 
        /* as we're called from CPU_ONLINE, the following shouldn't fail */
        for_each_pool_worker(worker, pool)
-               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
-                                                 pool->attrs->cpumask) < 0);
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
 }
 
 /*
index 77d7d034bac394d72feb746745f9c95496018e9b..b9cfdbfae9aaa90a6a3a85f83887128335fe36b7 100644 (file)
@@ -1841,6 +1841,9 @@ config TEST_BITMAP
 
          If unsure, say N.
 
+config TEST_UUID
+       tristate "Test functions located in the uuid module at runtime"
+
 config TEST_RHASHTABLE
        tristate "Perform selftest on resizable hash table"
        default n
index 499fb354d627f749635a85c72247d3769751d018..ff6a7a6c63951f080a655df67f7ad0524a03201a 100644 (file)
@@ -58,6 +58,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
 obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
 obj-$(CONFIG_TEST_PRINTF) += test_printf.o
 obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_UUID) += test_uuid.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
new file mode 100644 (file)
index 0000000..547d312
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Test cases for lib/uuid.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+
+struct test_uuid_data {
+       const char *uuid;
+       uuid_le le;
+       uuid_be be;
+};
+
+static const struct test_uuid_data test_uuid_test_data[] = {
+       {
+               .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
+               .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+               .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+       },
+       {
+               .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
+               .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+               .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+       },
+       {
+               .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
+               .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+               .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+       },
+};
+
+static const char * const test_uuid_wrong_data[] = {
+       "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
+       "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
+       "0cb4ddff-a545-4401-9d06-688af53e",     /* not enough data */
+};
+
+static unsigned total_tests __initdata;
+static unsigned failed_tests __initdata;
+
+static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
+                                   const char *data, const char *actual)
+{
+       pr_err("%s test #%u %s %s data: '%s'\n",
+              prefix,
+              total_tests,
+              wrong ? "passed on wrong" : "failed on",
+              be ? "BE" : "LE",
+              data);
+       if (actual && *actual)
+               pr_err("%s test #%u actual data: '%s'\n",
+                      prefix,
+                      total_tests,
+                      actual);
+       failed_tests++;
+}
+
+static void __init test_uuid_test(const struct test_uuid_data *data)
+{
+       uuid_le le;
+       uuid_be be;
+       char buf[48];
+
+       /* LE */
+       total_tests++;
+       if (uuid_le_to_bin(data->uuid, &le))
+               test_uuid_failed("conversion", false, false, data->uuid, NULL);
+
+       total_tests++;
+       if (uuid_le_cmp(data->le, le)) {
+               sprintf(buf, "%pUl", &le);
+               test_uuid_failed("cmp", false, false, data->uuid, buf);
+       }
+
+       /* BE */
+       total_tests++;
+       if (uuid_be_to_bin(data->uuid, &be))
+               test_uuid_failed("conversion", false, true, data->uuid, NULL);
+
+       total_tests++;
+       if (uuid_be_cmp(data->be, be)) {
+               sprintf(buf, "%pUb", &be);
+               test_uuid_failed("cmp", false, true, data->uuid, buf);
+       }
+}
+
+static void __init test_uuid_wrong(const char *data)
+{
+       uuid_le le;
+       uuid_be be;
+
+       /* LE */
+       total_tests++;
+       if (!uuid_le_to_bin(data, &le))
+               test_uuid_failed("negative", true, false, data, NULL);
+
+       /* BE */
+       total_tests++;
+       if (!uuid_be_to_bin(data, &be))
+               test_uuid_failed("negative", true, true, data, NULL);
+}
+
+static int __init test_uuid_init(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
+               test_uuid_test(&test_uuid_test_data[i]);
+
+       for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
+               test_uuid_wrong(test_uuid_wrong_data[i]);
+
+       if (failed_tests == 0)
+               pr_info("all %u tests passed\n", total_tests);
+       else
+               pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
+
+       return failed_tests ? -EINVAL : 0;
+}
+module_init(test_uuid_init);
+
+static void __exit test_uuid_exit(void)
+{
+       /* do nothing */
+}
+module_exit(test_uuid_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
index e116ae5fa00f2b417fe0bc6efe6452454873858e..37687af77ff847aeb47f88c8ea5d9b6cda8ac5b3 100644 (file)
@@ -106,8 +106,8 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
                return -EINVAL;
 
        for (i = 0; i < 16; i++) {
-               int hi = hex_to_bin(uuid[si[i]] + 0);
-               int lo = hex_to_bin(uuid[si[i]] + 1);
+               int hi = hex_to_bin(uuid[si[i] + 0]);
+               int lo = hex_to_bin(uuid[si[i] + 1]);
 
                b[ei[i]] = (hi << 4) | lo;
        }
index 1427366ad6735acfd09ce47eb6aa2bed042b4b3c..7bc04778f84dd6e2b68e4fadec74530f4bc64d57 100644 (file)
@@ -441,25 +441,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
+               if (!isolated)
+                       break;
+
                total_isolated += isolated;
+               cc->nr_freepages += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
                        page++;
                }
-
-               /* If a page was split, advance to the end of it */
-               if (isolated) {
-                       cc->nr_freepages += isolated;
-                       if (!strict &&
-                               cc->nr_migratepages <= cc->nr_freepages) {
-                               blockpfn += isolated;
-                               break;
-                       }
-
-                       blockpfn += isolated - 1;
-                       cursor += isolated - 1;
-                       continue;
+               if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
+                       blockpfn += isolated;
+                       break;
                }
+               /* Advance to the end of split page */
+               blockpfn += isolated - 1;
+               cursor += isolated - 1;
+               continue;
 
 isolate_fail:
                if (strict)
@@ -469,6 +467,9 @@ isolate_fail:
 
        }
 
+       if (locked)
+               spin_unlock_irqrestore(&cc->zone->lock, flags);
+
        /*
         * There is a tiny chance that we have read bogus compound_order(),
         * so be careful to not go outside of the pageblock.
@@ -490,9 +491,6 @@ isolate_fail:
        if (strict && blockpfn < end_pfn)
                total_isolated = 0;
 
-       if (locked)
-               spin_unlock_irqrestore(&cc->zone->lock, flags);
-
        /* Update the pageblock-skip if the whole pageblock was scanned */
        if (blockpfn == end_pfn)
                update_pageblock_skip(cc, valid_page, total_isolated, false);
@@ -1011,7 +1009,6 @@ static void isolate_freepages(struct compact_control *cc)
                                block_end_pfn = block_start_pfn,
                                block_start_pfn -= pageblock_nr_pages,
                                isolate_start_pfn = block_start_pfn) {
-
                /*
                 * This can iterate a massively long zone without finding any
                 * suitable migration targets, so periodically check if we need
@@ -1035,32 +1032,30 @@ static void isolate_freepages(struct compact_control *cc)
                        continue;
 
                /* Found a block suitable for isolating free pages from. */
-               isolate_freepages_block(cc, &isolate_start_pfn,
-                                       block_end_pfn, freelist, false);
+               isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+                                       freelist, false);
 
                /*
-                * If we isolated enough freepages, or aborted due to async
-                * compaction being contended, terminate the loop.
-                * Remember where the free scanner should restart next time,
-                * which is where isolate_freepages_block() left off.
-                * But if it scanned the whole pageblock, isolate_start_pfn
-                * now points at block_end_pfn, which is the start of the next
-                * pageblock.
-                * In that case we will however want to restart at the start
-                * of the previous pageblock.
+                * If we isolated enough freepages, or aborted due to lock
+                * contention, terminate.
                 */
                if ((cc->nr_freepages >= cc->nr_migratepages)
                                                        || cc->contended) {
-                       if (isolate_start_pfn >= block_end_pfn)
+                       if (isolate_start_pfn >= block_end_pfn) {
+                               /*
+                                * Restart at previous pageblock if more
+                                * freepages can be isolated next time.
+                                */
                                isolate_start_pfn =
                                        block_start_pfn - pageblock_nr_pages;
+                       }
                        break;
-               } else {
+               } else if (isolate_start_pfn < block_end_pfn) {
                        /*
-                        * isolate_freepages_block() should not terminate
-                        * prematurely unless contended, or isolated enough
+                        * If isolation failed early, do not continue
+                        * needlessly.
                         */
-                       VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+                       break;
                }
        }
 
index b8024fa7101d9a8ff625606de973b9b670d0ecf4..6c707bfe02fde002feae9ea2fab3fc9647c3baa0 100644 (file)
@@ -126,6 +126,17 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
                 */
                start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
                end_index = (endbyte >> PAGE_SHIFT);
+               if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
+                       /* First page is tricky as 0 - 1 = -1, but pgoff_t
+                        * is unsigned, so the end_index >= start_index
+                        * check below would be true and we'll discard the whole
+                        * file cache which is not what was asked.
+                        */
+                       if (end_index == 0)
+                               break;
+
+                       end_index--;
+               }
 
                if (end_index >= start_index) {
                        unsigned long count = invalidate_mapping_pages(mapping,
index 00ae878b2a3860ce5b4bfe41967430e781ebabc1..20f3b1f33f0e0f83486bba4df1c620f4ac8110f0 100644 (file)
@@ -2186,7 +2186,7 @@ repeat:
                if (file->f_ra.mmap_miss > 0)
                        file->f_ra.mmap_miss--;
                addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
-               do_set_pte(vma, addr, page, pte, false, false, true);
+               do_set_pte(vma, addr, page, pte, false, false);
                unlock_page(page);
                goto next;
 unlock:
index 9ed58530f6957bef1e2e0fa166ed87085dcae523..343a2b7e57aa25f7d02709ff13ab8a5e47c68a87 100644 (file)
@@ -1624,14 +1624,9 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (next - addr != HPAGE_PMD_SIZE) {
                get_page(page);
                spin_unlock(ptl);
-               if (split_huge_page(page)) {
-                       put_page(page);
-                       unlock_page(page);
-                       goto out_unlocked;
-               }
+               split_huge_page(page);
                put_page(page);
                unlock_page(page);
-               ret = 1;
                goto out_unlocked;
        }
 
@@ -2989,7 +2984,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 }
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long address, bool freeze)
+               unsigned long address, bool freeze, struct page *page)
 {
        spinlock_t *ptl;
        struct mm_struct *mm = vma->vm_mm;
@@ -2997,8 +2992,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
        mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
        ptl = pmd_lock(mm, pmd);
+
+       /*
+        * If caller asks to setup a migration entries, we need a page to check
+        * pmd against. Otherwise we can end up replacing wrong page.
+        */
+       VM_BUG_ON(freeze && !page);
+       if (page && page != pmd_page(*pmd))
+               goto out;
+
        if (pmd_trans_huge(*pmd)) {
-               struct page *page = pmd_page(*pmd);
+               page = pmd_page(*pmd);
                if (PageMlocked(page))
                        clear_page_mlock(page);
        } else if (!pmd_devmap(*pmd))
@@ -3025,24 +3029,8 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
                return;
 
        pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
-               return;
 
-       /*
-        * If caller asks to setup a migration entries, we need a page to check
-        * pmd against. Otherwise we can end up replacing wrong page.
-        */
-       VM_BUG_ON(freeze && !page);
-       if (page && page != pmd_page(*pmd))
-               return;
-
-       /*
-        * Caller holds the mmap_sem write mode or the anon_vma lock,
-        * so a huge pmd cannot materialize from under us (khugepaged
-        * holds both the mmap_sem write mode and the anon_vma lock
-        * write mode).
-        */
-       __split_huge_pmd(vma, pmd, address, freeze);
+       __split_huge_pmd(vma, pmd, address, freeze, page);
 }
 
 void vma_adjust_trans_huge(struct vm_area_struct *vma,
index d26162e81feaa78b2fb4615839cbb580fc626f4b..addfe4accc076817cdbc4009bb9f44d5e61315d4 100644 (file)
@@ -832,8 +832,27 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
         * Only the process that called mmap() has reserves for
         * private mappings.
         */
-       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
-               return true;
+       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+               /*
+                * Like the shared case above, a hole punch or truncate
+                * could have been performed on the private mapping.
+                * Examine the value of chg to determine if reserves
+                * actually exist or were previously consumed.
+                * Very Subtle - The value of chg comes from a previous
+                * call to vma_needs_reserves().  The reserve map for
+                * private mappings has different (opposite) semantics
+                * than that of shared mappings.  vma_needs_reserves()
+                * has already taken this difference in semantics into
+                * account.  Therefore, the meaning of chg is the same
+                * as in the shared case above.  Code could easily be
+                * combined, but keeping it separate draws attention to
+                * subtle differences.
+                */
+               if (chg)
+                       return false;
+               else
+                       return true;
+       }
 
        return false;
 }
@@ -1011,6 +1030,7 @@ static void destroy_compound_gigantic_page(struct page *page,
        int nr_pages = 1 << order;
        struct page *p = page + 1;
 
+       atomic_set(compound_mapcount_ptr(page), 0);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                clear_compound_head(p);
                set_page_refcounted(p);
@@ -1816,6 +1836,25 @@ static long __vma_reservation_common(struct hstate *h,
 
        if (vma->vm_flags & VM_MAYSHARE)
                return ret;
+       else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
+               /*
+                * In most cases, reserves always exist for private mappings.
+                * However, a file associated with mapping could have been
+                * hole punched or truncated after reserves were consumed.
+                * As subsequent fault on such a range will not use reserves.
+                * Subtle - The reserve map for private mappings has the
+                * opposite meaning than that of shared mappings.  If NO
+                * entry is in the reserve map, it means a reservation exists.
+                * If an entry exists in the reserve map, it means the
+                * reservation has already been consumed.  As a result, the
+                * return value of this routine is the opposite of the
+                * value returned from reserve map manipulation routines above.
+                */
+               if (ret)
+                       return 0;
+               else
+                       return 1;
+       }
        else
                return ret < 0 ? ret : 0;
 }
@@ -3344,7 +3383,7 @@ retry_avoidcopy:
        /* If no-one else is actually using this page, avoid the copy
         * and just make the page writable */
        if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
-               page_move_anon_rmap(old_page, vma, address);
+               page_move_anon_rmap(old_page, vma);
                set_huge_ptep_writable(vma, address, ptep);
                return 0;
        }
@@ -4190,7 +4229,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
                if (saddr) {
                        spte = huge_pte_offset(svma->vm_mm, saddr);
                        if (spte) {
-                               mm_inc_nr_pmds(mm);
                                get_page(virt_to_page(spte));
                                break;
                        }
@@ -4205,9 +4243,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        if (pud_none(*pud)) {
                pud_populate(mm, pud,
                                (pmd_t *)((unsigned long)spte & PAGE_MASK));
+               mm_inc_nr_pmds(mm);
        } else {
                put_page(virt_to_page(spte));
-               mm_inc_nr_pmds(mm);
        }
        spin_unlock(ptl);
 out:
index a37e5b6f9d25ca1cfd31938eebee0f90054ed4de..2524ec880e242ceb6efb5531b21f4bade9e9a3c0 100644 (file)
@@ -24,7 +24,8 @@
  */
 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
                        __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
-                       __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
+                       __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
+                       __GFP_ATOMIC)
 
 /* The GFP flags allowed during early boot */
 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
index 18b6a2b8d183550daa6c3e64ae7ba50610ead79c..6845f9294696cedb9d6567bd09a7a86862bd5c3b 100644 (file)
@@ -508,7 +508,7 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
        kasan_kmalloc(cache, object, cache->object_size, flags);
 }
 
-void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
+static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
 {
        unsigned long size = cache->object_size;
        unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
@@ -626,7 +626,7 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags)
                kasan_kmalloc(page->slab_cache, object, size, flags);
 }
 
-void kasan_kfree(void *ptr)
+void kasan_poison_kfree(void *ptr)
 {
        struct page *page;
 
@@ -636,7 +636,7 @@ void kasan_kfree(void *ptr)
                kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
                                KASAN_FREE_PAGE);
        else
-               kasan_slab_free(page->slab_cache, ptr);
+               kasan_poison_slab_free(page->slab_cache, ptr);
 }
 
 void kasan_kfree_large(const void *ptr)
@@ -763,8 +763,8 @@ static int kasan_mem_notifier(struct notifier_block *nb,
 
 static int __init kasan_memhotplug_init(void)
 {
-       pr_err("WARNING: KASAN doesn't support memory hot-add\n");
-       pr_err("Memory hot-add will be disabled\n");
+       pr_info("WARNING: KASAN doesn't support memory hot-add\n");
+       pr_info("Memory hot-add will be disabled\n");
 
        hotplug_memory_notifier(kasan_mem_notifier, 0);
 
index 4973505a9bdde8fda4291debdf21bdadfaa76c3b..65793f150d1f6e1f77d32ad64d37966f0ad9704c 100644 (file)
@@ -238,30 +238,23 @@ static void qlist_move_cache(struct qlist_head *from,
                                   struct qlist_head *to,
                                   struct kmem_cache *cache)
 {
-       struct qlist_node *prev = NULL, *curr;
+       struct qlist_node *curr;
 
        if (unlikely(qlist_empty(from)))
                return;
 
        curr = from->head;
+       qlist_init(from);
        while (curr) {
-               struct qlist_node *qlink = curr;
-               struct kmem_cache *obj_cache = qlink_to_cache(qlink);
-
-               if (obj_cache == cache) {
-                       if (unlikely(from->head == qlink)) {
-                               from->head = curr->next;
-                               prev = curr;
-                       } else
-                               prev->next = curr->next;
-                       if (unlikely(from->tail == qlink))
-                               from->tail = curr->next;
-                       from->bytes -= cache->size;
-                       qlist_put(to, qlink, cache->size);
-               } else {
-                       prev = curr;
-               }
-               curr = curr->next;
+               struct qlist_node *next = curr->next;
+               struct kmem_cache *obj_cache = qlink_to_cache(curr);
+
+               if (obj_cache == cache)
+                       qlist_put(to, curr, obj_cache->size);
+               else
+                       qlist_put(from, curr, obj_cache->size);
+
+               curr = next;
        }
 }
 
index e6429926e95741055fe1aa87a3082e6ed073f909..04320d3adbef9922edda52e43cc516463e9e33c6 100644 (file)
@@ -307,8 +307,10 @@ static void hex_dump_object(struct seq_file *seq,
        len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 
        seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
+       kasan_disable_current();
        seq_hex_dump(seq, "    ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
                     HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+       kasan_enable_current();
 }
 
 /*
index 925b431f3f03cf86402240b5dcc3fa784d808882..5339c89dff6317510b2710e9ab2770c23ac71e1e 100644 (file)
@@ -1608,7 +1608,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
 
 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 {
-       if (!current->memcg_may_oom || current->memcg_in_oom)
+       if (!current->memcg_may_oom)
                return;
        /*
         * We are in the middle of the charge context here, so we
@@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
         * ordering is imposed by list_lru_node->lock taken by
         * memcg_drain_all_list_lrus().
         */
+       rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
        css_for_each_descendant_pre(css, &memcg->css) {
                child = mem_cgroup_from_css(css);
                BUG_ON(child->kmemcg_id != kmemcg_id);
@@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
                if (!memcg->use_hierarchy)
                        break;
        }
+       rcu_read_unlock();
+
        memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
 
        memcg_free_cache_id(kmemcg_id);
@@ -4054,6 +4057,60 @@ static struct cftype mem_cgroup_legacy_files[] = {
        { },    /* terminate */
 };
 
+/*
+ * Private memory cgroup IDR
+ *
+ * Swap-out records and page cache shadow entries need to store memcg
+ * references in constrained space, so we maintain an ID space that is
+ * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
+ * memory-controlled cgroups to 64k.
+ *
+ * However, there usually are many references to the oflline CSS after
+ * the cgroup has been destroyed, such as page cache or reclaimable
+ * slab objects, that don't need to hang on to the ID. We want to keep
+ * those dead CSS from occupying IDs, or we might quickly exhaust the
+ * relatively small ID space and prevent the creation of new cgroups
+ * even when there are much fewer than 64k cgroups - possibly none.
+ *
+ * Maintain a private 16-bit ID space for memcg, and allow the ID to
+ * be freed and recycled when it's no longer needed, which is usually
+ * when the CSS is offlined.
+ *
+ * The only exception to that are records of swapped out tmpfs/shmem
+ * pages that need to be attributed to live ancestors on swapin. But
+ * those references are manageable from userspace.
+ */
+
+static DEFINE_IDR(mem_cgroup_idr);
+
+static void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+       atomic_inc(&memcg->id.ref);
+}
+
+static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+       if (atomic_dec_and_test(&memcg->id.ref)) {
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
+               memcg->id.id = 0;
+
+               /* Memcg ID pins CSS */
+               css_put(&memcg->css);
+       }
+}
+
+/**
+ * mem_cgroup_from_id - look up a memcg from a memcg id
+ * @id: the memcg id to look up
+ *
+ * Caller must hold rcu_read_lock().
+ */
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       return idr_find(&mem_cgroup_idr, id);
+}
+
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
        struct mem_cgroup_per_node *pn;
@@ -4113,6 +4170,12 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        if (!memcg)
                return NULL;
 
+       memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+                                1, MEM_CGROUP_ID_MAX,
+                                GFP_KERNEL);
+       if (memcg->id.id < 0)
+               goto fail;
+
        memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
        if (!memcg->stat)
                goto fail;
@@ -4139,8 +4202,11 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&memcg->cgwb_list);
 #endif
+       idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
        return memcg;
 fail:
+       if (memcg->id.id > 0)
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
        mem_cgroup_free(memcg);
        return NULL;
 }
@@ -4200,15 +4266,14 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        return &memcg->css;
 fail:
        mem_cgroup_free(memcg);
-       return NULL;
+       return ERR_PTR(-ENOMEM);
 }
 
-static int
-mem_cgroup_css_online(struct cgroup_subsys_state *css)
+static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
 {
-       if (css->id > MEM_CGROUP_ID_MAX)
-               return -ENOSPC;
-
+       /* Online state pins memcg ID, memcg ID pins CSS */
+       mem_cgroup_id_get(mem_cgroup_from_css(css));
+       css_get(css);
        return 0;
 }
 
@@ -4231,6 +4296,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 
        memcg_offline_kmem(memcg);
        wb_memcg_offline(memcg);
+
+       mem_cgroup_id_put(memcg);
 }
 
 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
@@ -5541,6 +5608,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
        struct mem_cgroup *memcg;
        unsigned int nr_pages;
        bool compound;
+       unsigned long flags;
 
        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
@@ -5571,10 +5639,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 
        commit_charge(newpage, memcg, false);
 
-       local_irq_disable();
+       local_irq_save(flags);
        mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
        memcg_check_events(memcg, newpage);
-       local_irq_enable();
+       local_irq_restore(flags);
 }
 
 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
@@ -5752,6 +5820,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!memcg)
                return;
 
+       mem_cgroup_id_get(memcg);
        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
@@ -5770,6 +5839,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        VM_BUG_ON(!irqs_disabled());
        mem_cgroup_charge_statistics(memcg, page, false, -1);
        memcg_check_events(memcg, page);
+
+       if (!mem_cgroup_is_root(memcg))
+               css_put(&memcg->css);
 }
 
 /*
@@ -5800,11 +5872,11 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
            !page_counter_try_charge(&memcg->swap, 1, &counter))
                return -ENOMEM;
 
+       mem_cgroup_id_get(memcg);
        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
 
-       css_get(&memcg->css);
        return 0;
 }
 
@@ -5833,7 +5905,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
                                page_counter_uncharge(&memcg->memsw, 1);
                }
                mem_cgroup_swap_statistics(memcg, false);
-               css_put(&memcg->css);
+               mem_cgroup_id_put(memcg);
        }
        rcu_read_unlock();
 }
index 15322b73636b4272d6c84af6742a0822822301db..9e046819e619487ed5a8e3cc65f4131812168692 100644 (file)
@@ -2399,8 +2399,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                 * Protected against the rmap code by
                                 * the page lock.
                                 */
-                               page_move_anon_rmap(compound_head(old_page),
-                                                   vma, address);
+                               page_move_anon_rmap(old_page, vma);
                        }
                        unlock_page(old_page);
                        return wp_page_reuse(mm, vma, address, page_table, ptl,
@@ -2877,7 +2876,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
  * vm_ops->map_pages.
  */
 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
-               struct page *page, pte_t *pte, bool write, bool anon, bool old)
+               struct page *page, pte_t *pte, bool write, bool anon)
 {
        pte_t entry;
 
@@ -2885,8 +2884,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
        entry = mk_pte(page, vma->vm_page_prot);
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-       if (old)
-               entry = pte_mkold(entry);
        if (anon) {
                inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
                page_add_new_anon_rmap(page, vma, address, false);
@@ -2900,16 +2897,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
        update_mmu_cache(vma, address, pte);
 }
 
-/*
- * If architecture emulates "accessed" or "young" bit without HW support,
- * there is no much gain with fault_around.
- */
 static unsigned long fault_around_bytes __read_mostly =
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-       PAGE_SIZE;
-#else
        rounddown_pow_of_two(65536);
-#endif
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
@@ -3032,20 +3021,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         */
        if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
                pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-               if (!pte_same(*pte, orig_pte))
-                       goto unlock_out;
                do_fault_around(vma, address, pte, pgoff, flags);
-               /* Check if the fault is handled by faultaround */
-               if (!pte_same(*pte, orig_pte)) {
-                       /*
-                        * Faultaround produce old pte, but the pte we've
-                        * handler fault for should be young.
-                        */
-                       pte_t entry = pte_mkyoung(*pte);
-                       if (ptep_set_access_flags(vma, address, pte, entry, 0))
-                               update_mmu_cache(vma, address, pte);
+               if (!pte_same(*pte, orig_pte))
                        goto unlock_out;
-               }
                pte_unmap_unlock(pte, ptl);
        }
 
@@ -3060,7 +3038,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                put_page(fault_page);
                return ret;
        }
-       do_set_pte(vma, address, fault_page, pte, false, false, false);
+       do_set_pte(vma, address, fault_page, pte, false, false);
        unlock_page(fault_page);
 unlock_out:
        pte_unmap_unlock(pte, ptl);
@@ -3111,7 +3089,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                }
                goto uncharge_out;
        }
-       do_set_pte(vma, address, new_page, pte, true, true, false);
+       do_set_pte(vma, address, new_page, pte, true, true);
        mem_cgroup_commit_charge(new_page, memcg, false, false);
        lru_cache_add_active_or_unevictable(new_page, vma);
        pte_unmap_unlock(pte, ptl);
@@ -3164,7 +3142,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                put_page(fault_page);
                return ret;
        }
-       do_set_pte(vma, address, fault_page, pte, true, false, false);
+       do_set_pte(vma, address, fault_page, pte, true, false);
        pte_unmap_unlock(pte, ptl);
 
        if (set_page_dirty(fault_page))
index 9e075f829d0d2598f6d404cb99b342bb6596b23f..8f65464da5de84d8bca0a8d97352f3c800f90614 100644 (file)
@@ -104,20 +104,16 @@ static inline void poison_element(mempool_t *pool, void *element)
 
 static void kasan_poison_element(mempool_t *pool, void *element)
 {
-       if (pool->alloc == mempool_alloc_slab)
-               kasan_poison_slab_free(pool->pool_data, element);
-       if (pool->alloc == mempool_kmalloc)
-               kasan_kfree(element);
+       if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+               kasan_poison_kfree(element);
        if (pool->alloc == mempool_alloc_pages)
                kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
 
 static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
 {
-       if (pool->alloc == mempool_alloc_slab)
-               kasan_slab_alloc(pool->pool_data, element, flags);
-       if (pool->alloc == mempool_kmalloc)
-               kasan_krealloc(element, (size_t)pool->pool_data, flags);
+       if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+               kasan_unpoison_slab(element);
        if (pool->alloc == mempool_alloc_pages)
                kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 }
index 9baf41c877ffb98595aac0834b8aa43aedd6dc76..bd3fdc202e8be19411d641dcfdf5addd8c358944 100644 (file)
@@ -431,6 +431,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
 
        return MIGRATEPAGE_SUCCESS;
 }
+EXPORT_SYMBOL(migrate_page_move_mapping);
 
 /*
  * The expected number of remaining references is the same as that
@@ -586,6 +587,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
 
        mem_cgroup_migrate(page, newpage);
 }
+EXPORT_SYMBOL(migrate_page_copy);
 
 /************************************************************
  *                    Migration functions
index dfb1ab61fb2374379ca014ee4f764366dc20c081..ddf74487f8487f0d109416f93a433ba58117426e 100644 (file)
@@ -474,13 +474,8 @@ static bool __oom_reap_task(struct task_struct *tsk)
        p = find_lock_task_mm(tsk);
        if (!p)
                goto unlock_oom;
-
        mm = p->mm;
-       if (!atomic_inc_not_zero(&mm->mm_users)) {
-               task_unlock(p);
-               goto unlock_oom;
-       }
-
+       atomic_inc(&mm->mm_users);
        task_unlock(p);
 
        if (!down_read_trylock(&mm->mmap_sem)) {
@@ -625,8 +620,6 @@ void try_oom_reaper(struct task_struct *tsk)
        if (atomic_read(&mm->mm_users) > 1) {
                rcu_read_lock();
                for_each_process(p) {
-                       bool exiting;
-
                        if (!process_shares_mm(p, mm))
                                continue;
                        if (fatal_signal_pending(p))
@@ -636,10 +629,7 @@ void try_oom_reaper(struct task_struct *tsk)
                         * If the task is exiting make sure the whole thread group
                         * is exiting and cannot acces mm anymore.
                         */
-                       spin_lock_irq(&p->sighand->siglock);
-                       exiting = signal_group_exit(p->signal);
-                       spin_unlock_irq(&p->sighand->siglock);
-                       if (exiting)
+                       if (signal_group_exit(p->signal))
                                continue;
 
                        /* Give up */
index b9956fdee8f5d0ed55713eef51e13a6ea42b2e0d..e2481949494c4e6bf4cd0fd0799d178bd2203008 100644 (file)
@@ -373,8 +373,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
        struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
        unsigned long bytes = vm_dirty_bytes;
        unsigned long bg_bytes = dirty_background_bytes;
-       unsigned long ratio = vm_dirty_ratio;
-       unsigned long bg_ratio = dirty_background_ratio;
+       /* convert ratios to per-PAGE_SIZE for higher precision */
+       unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
+       unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
        unsigned long thresh;
        unsigned long bg_thresh;
        struct task_struct *tsk;
@@ -386,26 +387,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
                /*
                 * The byte settings can't be applied directly to memcg
                 * domains.  Convert them to ratios by scaling against
-                * globally available memory.
+                * globally available memory.  As the ratios are in
+                * per-PAGE_SIZE, they can be obtained by dividing bytes by
+                * number of pages.
                 */
                if (bytes)
-                       ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
-                                   global_avail, 100UL);
+                       ratio = min(DIV_ROUND_UP(bytes, global_avail),
+                                   PAGE_SIZE);
                if (bg_bytes)
-                       bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
-                                      global_avail, 100UL);
+                       bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
+                                      PAGE_SIZE);
                bytes = bg_bytes = 0;
        }
 
        if (bytes)
                thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
        else
-               thresh = (ratio * available_memory) / 100;
+               thresh = (ratio * available_memory) / PAGE_SIZE;
 
        if (bg_bytes)
                bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
        else
-               bg_thresh = (bg_ratio * available_memory) / 100;
+               bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
 
        if (bg_thresh >= thresh)
                bg_thresh = thresh / 2;
index f8f3bfc435eec5bf0144798f6b890e6deacee0f6..8b3e1341b7544608cac4777a37bbd424432488e1 100644 (file)
@@ -286,7 +286,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
 /* Returns true if the struct page for the pfn is uninitialised */
 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
 {
-       if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
+       int nid = early_pfn_to_nid(pfn);
+
+       if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
                return true;
 
        return false;
@@ -656,6 +658,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        INIT_LIST_HEAD(&page->lru);
@@ -673,6 +678,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        set_page_private(page, 0);
@@ -1267,7 +1275,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
        spin_lock(&early_pfn_lock);
        nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
        if (nid < 0)
-               nid = 0;
+               nid = first_online_node;
        spin_unlock(&early_pfn_lock);
 
        return nid;
@@ -2609,11 +2617,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
                                page = list_last_entry(list, struct page, lru);
                        else
                                page = list_first_entry(list, struct page, lru);
-               } while (page && check_new_pcp(page));
 
-               __dec_zone_state(zone, NR_ALLOC_BATCH);
-               list_del(&page->lru);
-               pcp->count--;
+                       __dec_zone_state(zone, NR_ALLOC_BATCH);
+                       list_del(&page->lru);
+                       pcp->count--;
+
+               } while (check_new_pcp(page));
        } else {
                /*
                 * We most definitely don't want callers attempting to
@@ -3023,6 +3032,7 @@ reset_fair:
                apply_fair = false;
                fair_skipped = false;
                reset_alloc_batches(ac->preferred_zoneref->zone);
+               z = ac->preferred_zoneref;
                goto zonelist_scan;
        }
 
@@ -3596,6 +3606,17 @@ retry:
         */
        alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
+       /*
+        * Reset the zonelist iterators if memory policies can be ignored.
+        * These allocations are high priority and system rather than user
+        * orientated.
+        */
+       if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) {
+               ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
+               ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+                                       ac->high_zoneidx, ac->nodemask);
+       }
+
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, order,
                                alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
@@ -3604,12 +3625,6 @@ retry:
 
        /* Allocate without watermarks if the context allows */
        if (alloc_flags & ALLOC_NO_WATERMARKS) {
-               /*
-                * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
-                * the allocation is high priority and these type of
-                * allocations are system rather than user orientated
-                */
-               ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
                page = get_page_from_freelist(gfp_mask, order,
                                                ALLOC_NO_WATERMARKS, ac);
                if (page)
@@ -3808,7 +3823,11 @@ retry_cpuset:
        /* Dirty zone balancing only done in the fast path */
        ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
-       /* The preferred zone is used for statistics later */
+       /*
+        * The preferred zone is used for statistics but crucially it is
+        * also used as the starting point for the zonelist iterator. It
+        * may get reset for allocations that ignore memory policies.
+        */
        ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
                                        ac.high_zoneidx, ac.nodemask);
        if (!ac.preferred_zoneref) {
index 792b56da13d8564f4f6fbab68dcf46297bd13c30..fedeba88c9cb82acdbc5bcf5c6efab63279607d2 100644 (file)
@@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order)
 
        for (i = 0; i < (1 << order); i++) {
                page_ext = lookup_page_ext(page + i);
+               if (unlikely(!page_ext))
+                       continue;
                __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
        }
 }
@@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
+
        struct stack_trace trace = {
                .nr_entries = 0,
                .max_entries = ARRAY_SIZE(page_ext->trace_entries),
@@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
                .skip = 3,
        };
 
+       if (unlikely(!page_ext))
+               return;
+
        save_stack_trace(&trace);
 
        page_ext->order = order;
@@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
 void __set_page_owner_migrate_reason(struct page *page, int reason)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
 
        page_ext->last_migrate_reason = reason;
 }
@@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
 gfp_t __get_page_owner_gfp(struct page *page)
 {
        struct page_ext *page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               /*
+                * The caller just returns 0 if no valid gfp
+                * So return 0 here too.
+                */
+               return 0;
 
        return page_ext->gfp_mask;
 }
@@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
        struct page_ext *new_ext = lookup_page_ext(newpage);
        int i;
 
+       if (unlikely(!old_ext || !new_ext))
+               return;
+
        new_ext->order = old_ext->order;
        new_ext->gfp_mask = old_ext->gfp_mask;
        new_ext->nr_entries = old_ext->nr_entries;
@@ -190,8 +207,15 @@ void __dump_page_owner(struct page *page)
                .nr_entries = page_ext->nr_entries,
                .entries = &page_ext->trace_entries[0],
        };
-       gfp_t gfp_mask = page_ext->gfp_mask;
-       int mt = gfpflags_to_migratetype(gfp_mask);
+       gfp_t gfp_mask;
+       int mt;
+
+       if (unlikely(!page_ext)) {
+               pr_alert("There is not page extension available.\n");
+               return;
+       }
+       gfp_mask = page_ext->gfp_mask;
+       mt = gfpflags_to_migratetype(gfp_mask);
 
        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
                pr_alert("page_owner info is not active (free page?)\n");
@@ -251,6 +275,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                }
 
                page_ext = lookup_page_ext(page);
+               if (unlikely(!page_ext))
+                       continue;
 
                /*
                 * Some pages could be missed by concurrent allocation or free,
@@ -317,6 +343,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
                                continue;
 
                        page_ext = lookup_page_ext(page);
+                       if (unlikely(!page_ext))
+                               continue;
 
                        /* Maybe overraping zone */
                        if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
index 1eae5fad2446b4b7b069fde3d0e5b373418ea7bb..2e647c65916b91b00177837370e210d71c568f5e 100644 (file)
@@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page)
        struct page_ext *page_ext;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
 }
 
@@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page)
        struct page_ext *page_ext;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
 }
 
@@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page)
        struct page_ext *page_ext;
 
        page_ext = lookup_page_ext(page);
-       if (!page_ext)
+       if (unlikely(!page_ext))
                return false;
 
        return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
index 0c59684f1ff2ef5cdf3cd7ab688442e5afaf014a..9903830aaebbf00256fbb108891c2e5e09baecac 100644 (file)
@@ -112,7 +112,7 @@ struct pcpu_chunk {
        int                     map_used;       /* # of map entries used before the sentry */
        int                     map_alloc;      /* # of map entries allocated */
        int                     *map;           /* allocation map */
-       struct work_struct      map_extend_work;/* async ->map[] extension */
+       struct list_head        map_extend_list;/* on pcpu_map_extend_chunks */
 
        void                    *data;          /* chunk data */
        int                     first_free;     /* no free below this */
@@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
 static int pcpu_reserved_chunk_limit;
 
 static DEFINE_SPINLOCK(pcpu_lock);     /* all internal data structures */
-static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
+static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
 
 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
 
+/* chunks which need their map areas extended, protected by pcpu_lock */
+static LIST_HEAD(pcpu_map_extend_chunks);
+
 /*
  * The number of empty populated pages, protected by pcpu_lock.  The
  * reserved chunk doesn't contribute to the count.
@@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
 {
        int margin, new_alloc;
 
+       lockdep_assert_held(&pcpu_lock);
+
        if (is_atomic) {
                margin = 3;
 
                if (chunk->map_alloc <
-                   chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
-                   pcpu_async_enabled)
-                       schedule_work(&chunk->map_extend_work);
+                   chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
+                       if (list_empty(&chunk->map_extend_list)) {
+                               list_add_tail(&chunk->map_extend_list,
+                                             &pcpu_map_extend_chunks);
+                               pcpu_schedule_balance_work();
+                       }
+               }
        } else {
                margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
        }
@@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
        size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
        unsigned long flags;
 
+       lockdep_assert_held(&pcpu_alloc_mutex);
+
        new = pcpu_mem_zalloc(new_size);
        if (!new)
                return -ENOMEM;
@@ -467,20 +478,6 @@ out_unlock:
        return 0;
 }
 
-static void pcpu_map_extend_workfn(struct work_struct *work)
-{
-       struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
-                                               map_extend_work);
-       int new_alloc;
-
-       spin_lock_irq(&pcpu_lock);
-       new_alloc = pcpu_need_to_extend(chunk, false);
-       spin_unlock_irq(&pcpu_lock);
-
-       if (new_alloc)
-               pcpu_extend_area_map(chunk, new_alloc);
-}
-
 /**
  * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
  * @chunk: chunk the candidate area belongs to
@@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
        chunk->map_used = 1;
 
        INIT_LIST_HEAD(&chunk->list);
-       INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
+       INIT_LIST_HEAD(&chunk->map_extend_list);
        chunk->free_size = pcpu_unit_size;
        chunk->contig_hint = pcpu_unit_size;
 
@@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
                return NULL;
        }
 
+       if (!is_atomic)
+               mutex_lock(&pcpu_alloc_mutex);
+
        spin_lock_irqsave(&pcpu_lock, flags);
 
        /* serve reserved allocations from the reserved chunk if available */
@@ -967,12 +967,9 @@ restart:
        if (is_atomic)
                goto fail;
 
-       mutex_lock(&pcpu_alloc_mutex);
-
        if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
                chunk = pcpu_create_chunk();
                if (!chunk) {
-                       mutex_unlock(&pcpu_alloc_mutex);
                        err = "failed to allocate new chunk";
                        goto fail;
                }
@@ -983,7 +980,6 @@ restart:
                spin_lock_irqsave(&pcpu_lock, flags);
        }
 
-       mutex_unlock(&pcpu_alloc_mutex);
        goto restart;
 
 area_found:
@@ -993,8 +989,6 @@ area_found:
        if (!is_atomic) {
                int page_start, page_end, rs, re;
 
-               mutex_lock(&pcpu_alloc_mutex);
-
                page_start = PFN_DOWN(off);
                page_end = PFN_UP(off + size);
 
@@ -1005,7 +999,6 @@ area_found:
 
                        spin_lock_irqsave(&pcpu_lock, flags);
                        if (ret) {
-                               mutex_unlock(&pcpu_alloc_mutex);
                                pcpu_free_area(chunk, off, &occ_pages);
                                err = "failed to populate";
                                goto fail_unlock;
@@ -1045,6 +1038,8 @@ fail:
                /* see the flag handling in pcpu_blance_workfn() */
                pcpu_atomic_alloc_failed = true;
                pcpu_schedule_balance_work();
+       } else {
+               mutex_unlock(&pcpu_alloc_mutex);
        }
        return NULL;
 }
@@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
                if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
                        continue;
 
+               list_del_init(&chunk->map_extend_list);
                list_move(&chunk->list, &to_free);
        }
 
@@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
                pcpu_destroy_chunk(chunk);
        }
 
+       /* service chunks which requested async area map extension */
+       do {
+               int new_alloc = 0;
+
+               spin_lock_irq(&pcpu_lock);
+
+               chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
+                                       struct pcpu_chunk, map_extend_list);
+               if (chunk) {
+                       list_del_init(&chunk->map_extend_list);
+                       new_alloc = pcpu_need_to_extend(chunk, false);
+               }
+
+               spin_unlock_irq(&pcpu_lock);
+
+               if (new_alloc)
+                       pcpu_extend_area_map(chunk, new_alloc);
+       } while (chunk);
+
        /*
         * Ensure there are certain number of free populated pages for
         * atomic allocs.  Fill up from the most packed so that atomic
@@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
         */
        schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
        INIT_LIST_HEAD(&schunk->list);
-       INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
+       INIT_LIST_HEAD(&schunk->map_extend_list);
        schunk->base_addr = base_addr;
        schunk->map = smap;
        schunk->map_alloc = ARRAY_SIZE(smap);
@@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
        if (dyn_size) {
                dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
                INIT_LIST_HEAD(&dchunk->list);
-               INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
+               INIT_LIST_HEAD(&dchunk->map_extend_list);
                dchunk->base_addr = base_addr;
                dchunk->map = dmap;
                dchunk->map_alloc = ARRAY_SIZE(dmap);
index 0ea5d9071b32b967d012f36e600a2ee75acd8f3d..701b93fea2a0677cd8a58e646360747bf5626c5a 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1084,23 +1084,20 @@ EXPORT_SYMBOL_GPL(page_mkclean);
  * page_move_anon_rmap - move a page to our anon_vma
  * @page:      the page to move to our anon_vma
  * @vma:       the vma the page belongs to
- * @address:   the user virtual address mapped
  *
  * When a page belongs exclusively to one process after a COW event,
  * that page can be moved into the anon_vma that belongs to just that
  * process, so the rmap code will not search the parent or sibling
  * processes.
  */
-void page_move_anon_rmap(struct page *page,
-       struct vm_area_struct *vma, unsigned long address)
+void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
 
+       page = compound_head(page);
+
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_VMA(!anon_vma, vma);
-       if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
-               address &= HPAGE_PMD_MASK;
-       VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
 
        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
        /*
@@ -1427,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        goto out;
        }
 
-       pte = page_check_address(page, mm, address, &ptl, 0);
+       pte = page_check_address(page, mm, address, &ptl,
+                                PageTransCompound(page));
        if (!pte)
                goto out;
 
index a36144909b2840213c087f3eb32fbd10d7c880ee..171dee7a131f6959fe444405f280374be7eb25d0 100644 (file)
@@ -2225,9 +2225,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                        error = shmem_getpage(inode, index, &page, SGP_FALLOC);
                if (error) {
                        /* Remove the !PageUptodate pages we added */
-                       shmem_undo_range(inode,
-                               (loff_t)start << PAGE_SHIFT,
-                               (loff_t)index << PAGE_SHIFT, true);
+                       if (index > start) {
+                               shmem_undo_range(inode,
+                                   (loff_t)start << PAGE_SHIFT,
+                                   ((loff_t)index << PAGE_SHIFT) - 1, true);
+                       }
                        goto undone;
                }
 
index a65dad7fdcd12495a51eabd91fc76ed96edb0576..82317abb03edc7aa2c89e0a20032fc57a532a723 100644 (file)
@@ -526,8 +526,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
                goto out_unlock;
 
        cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
-       cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
-                              css->id, memcg_name_buf);
+       cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
+                              css->serial_nr, memcg_name_buf);
        if (!cache_name)
                goto out_unlock;
 
index 95916142fc46d3a80a6ddfa54218e1de641f6045..90530ff8ed161864de461f208b8c511c369a167a 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -242,7 +242,7 @@ void rotate_reclaimable_page(struct page *page)
                get_page(page);
                local_irq_save(flags);
                pvec = this_cpu_ptr(&lru_rotate_pvecs);
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_move_tail(pvec);
                local_irq_restore(flags);
        }
@@ -296,7 +296,7 @@ void activate_page(struct page *page)
                struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 
                get_page(page);
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, __activate_page, NULL);
                put_cpu_var(activate_page_pvecs);
        }
@@ -391,9 +391,8 @@ static void __lru_cache_add(struct page *page)
        struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 
        get_page(page);
-       if (!pagevec_space(pvec))
+       if (!pagevec_add(pvec, page) || PageCompound(page))
                __pagevec_lru_add(pvec);
-       pagevec_add(pvec, page);
        put_cpu_var(lru_add_pvec);
 }
 
@@ -628,7 +627,7 @@ void deactivate_file_page(struct page *page)
        if (likely(get_page_unless_zero(page))) {
                struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
 
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
                put_cpu_var(lru_deactivate_file_pvecs);
        }
@@ -648,7 +647,7 @@ void deactivate_page(struct page *page)
                struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 
                get_page(page);
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
                put_cpu_var(lru_deactivate_pvecs);
        }
@@ -667,6 +666,24 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
 
 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 
+/*
+ * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
+ * workqueue, aiding in getting memory freed.
+ */
+static struct workqueue_struct *lru_add_drain_wq;
+
+static int __init lru_init(void)
+{
+       lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
+
+       if (WARN(!lru_add_drain_wq,
+               "Failed to create workqueue lru_add_drain_wq"))
+               return -ENOMEM;
+
+       return 0;
+}
+early_initcall(lru_init);
+
 void lru_add_drain_all(void)
 {
        static DEFINE_MUTEX(lock);
@@ -686,7 +703,7 @@ void lru_add_drain_all(void)
                    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
                    need_activate_page_drain(cpu)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
-                       schedule_work_on(cpu, work);
+                       queue_work_on(cpu, lru_add_drain_wq, work);
                        cpumask_set_cpu(cpu, &has_work);
                }
        }
index 0d457e7db8d6df5e3bd185ac2b2333830bccf7b4..c99463ac02fb56d270fdc4ea7ab749e1f58fe666 100644 (file)
@@ -252,7 +252,10 @@ static inline void free_swap_cache(struct page *page)
 void free_page_and_swap_cache(struct page *page)
 {
        free_swap_cache(page);
-       put_page(page);
+       if (is_huge_zero_page(page))
+               put_huge_zero_page();
+       else
+               put_page(page);
 }
 
 /*
index cf7ad1a53be03b866ecee6325c47abef278d70fc..e11475cdeb7adb66194d8fd985fcbe155be806f3 100644 (file)
@@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);
  */
 void vm_unmap_ram(const void *mem, unsigned int count)
 {
-       unsigned long size = count << PAGE_SHIFT;
+       unsigned long size = (unsigned long)count << PAGE_SHIFT;
        unsigned long addr = (unsigned long)mem;
 
        BUG_ON(!addr);
@@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram);
  */
 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 {
-       unsigned long size = count << PAGE_SHIFT;
+       unsigned long size = (unsigned long)count << PAGE_SHIFT;
        unsigned long addr;
        void *mem;
 
@@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count,
                unsigned long flags, pgprot_t prot)
 {
        struct vm_struct *area;
+       unsigned long size;             /* In bytes */
 
        might_sleep();
 
        if (count > totalram_pages)
                return NULL;
 
-       area = get_vm_area_caller((count << PAGE_SHIFT), flags,
-                                       __builtin_return_address(0));
+       size = (unsigned long)count << PAGE_SHIFT;
+       area = get_vm_area_caller(size, flags, __builtin_return_address(0));
        if (!area)
                return NULL;
 
index 77e42ef388c2a832169d4bcace8a505c45d95516..cb2a67bb41581de147427e2289190beea33f69da 100644 (file)
@@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                                continue;
 
                        page_ext = lookup_page_ext(page);
+                       if (unlikely(!page_ext))
+                               continue;
 
                        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
                                continue;
index 8a75f8d2916af99a1efb6101666038dcd4309c04..577277546d985db41aee898a3c69b1b0488d289b 100644 (file)
@@ -491,7 +491,7 @@ static int __init workingset_init(void)
        max_order = fls_long(totalram_pages - 1);
        if (max_order > timestamp_bits)
                bucket_order = max_order - timestamp_bits;
-       printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+       pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
               timestamp_bits, max_order, bucket_order);
 
        ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
index 34917d55d311222d4255b4d2e1185e01fa173d73..8f9e89ca1d312152711b23580cacf006407dff8b 100644 (file)
@@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                /* HEADLESS page stored */
                bud = HEADLESS;
        } else {
-               bud = (handle - zhdr->first_num) & BUDDY_MASK;
+               bud = handle_to_buddy(handle);
 
                switch (bud) {
                case FIRST:
@@ -572,15 +572,19 @@ next:
                        pool->pages_nr--;
                        spin_unlock(&pool->lock);
                        return 0;
-               } else if (zhdr->first_chunks != 0 &&
-                          zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) {
-                       /* Full, add to buddied list */
-                       list_add(&zhdr->buddy, &pool->buddied);
-               } else if (!test_bit(PAGE_HEADLESS, &page->private)) {
-                       z3fold_compact_page(zhdr);
-                       /* add to unbuddied list */
-                       freechunks = num_free_chunks(zhdr);
-                       list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
+               }  else if (!test_bit(PAGE_HEADLESS, &page->private)) {
+                       if (zhdr->first_chunks != 0 &&
+                           zhdr->last_chunks != 0 &&
+                           zhdr->middle_chunks != 0) {
+                               /* Full, add to buddied list */
+                               list_add(&zhdr->buddy, &pool->buddied);
+                       } else {
+                               z3fold_compact_page(zhdr);
+                               /* add to unbuddied list */
+                               freechunks = num_free_chunks(zhdr);
+                               list_add(&zhdr->buddy,
+                                        &pool->unbuddied[freechunks]);
+                       }
                }
 
                /* add to beginning of LRU */
index a1e273af6fc81660dfd5ad088cb5b0ee336d650d..82a116ba590eb5d289a76f73ca09f1cab9b4fdf5 100644 (file)
@@ -290,6 +290,10 @@ static void vlan_sync_address(struct net_device *dev,
        if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
                return;
 
+       /* vlan continues to inherit address of lower device */
+       if (vlan_dev_inherit_address(vlandev, dev))
+               goto out;
+
        /* vlan address was different from the old address and is equal to
         * the new address */
        if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
@@ -302,6 +306,7 @@ static void vlan_sync_address(struct net_device *dev,
            !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
                dev_uc_add(dev, vlandev->dev_addr);
 
+out:
        ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
 }
 
index 9d010a09ab9880a6eb15cee0524b46be53d76587..cc15579780669a613165336387a3f8eabc669dec 100644 (file)
@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
 void vlan_setup(struct net_device *dev);
 int register_vlan_dev(struct net_device *dev);
 void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+bool vlan_dev_inherit_address(struct net_device *dev,
+                             struct net_device *real_dev);
 
 static inline u32 vlan_get_ingress_priority(struct net_device *dev,
                                            u16 vlan_tci)
index e7e62570bdb869f64e7198c53357e18702165bf1..516b0e73263c7cac1db1546d05eb6a1d6f642ab5 100644 (file)
@@ -146,10 +146,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
 
 static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
-       /* TODO: gotta make sure the underlying layer can handle it,
-        * maybe an IFF_VLAN_CAPABLE flag for devices?
-        */
-       if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+       unsigned int max_mtu = real_dev->mtu;
+
+       if (netif_reduces_vlan_mtu(real_dev))
+               max_mtu -= VLAN_HLEN;
+       if (max_mtu < new_mtu)
                return -ERANGE;
 
        dev->mtu = new_mtu;
@@ -245,6 +247,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
        strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
 }
 
+bool vlan_dev_inherit_address(struct net_device *dev,
+                             struct net_device *real_dev)
+{
+       if (dev->addr_assign_type != NET_ADDR_STOLEN)
+               return false;
+
+       ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+       call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+       return true;
+}
+
 static int vlan_dev_open(struct net_device *dev)
 {
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -255,7 +268,8 @@ static int vlan_dev_open(struct net_device *dev)
            !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
                return -ENETDOWN;
 
-       if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
+       if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+           !vlan_dev_inherit_address(dev, real_dev)) {
                err = dev_uc_add(real_dev, dev->dev_addr);
                if (err < 0)
                        goto out;
@@ -560,8 +574,10 @@ static int vlan_dev_init(struct net_device *dev)
        /* ipv6 shared card related stuff */
        dev->dev_id = real_dev->dev_id;
 
-       if (is_zero_ether_addr(dev->dev_addr))
-               eth_hw_addr_inherit(dev, real_dev);
+       if (is_zero_ether_addr(dev->dev_addr)) {
+               ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+               dev->addr_assign_type = NET_ADDR_STOLEN;
+       }
        if (is_zero_ether_addr(dev->broadcast))
                memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
 
index c92b52f37d38de143022f172881dd03f076b0194..1270207f3d7c9dd6fde0e1f7839acfe18a4d82c9 100644 (file)
@@ -118,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
 {
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev;
+       unsigned int max_mtu;
        __be16 proto;
        int err;
 
@@ -144,9 +145,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
        if (err < 0)
                return err;
 
+       max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
+                                                    real_dev->mtu;
        if (!tb[IFLA_MTU])
-               dev->mtu = real_dev->mtu;
-       else if (dev->mtu > real_dev->mtu)
+               dev->mtu = max_mtu;
+       else if (dev->mtu > max_mtu)
                return -EINVAL;
 
        err = vlan_changelink(dev, tb, data);
index 4fd6af47383a014b72b3377fc1556c6bff18e304..adb6e3d21b1eb9222f90ab77383e20ea78f9336a 100644 (file)
@@ -124,7 +124,7 @@ as_indicate_complete:
                break;
        case as_addparty:
        case as_dropparty:
-               sk->sk_err_soft = msg->reply;
+               sk->sk_err_soft = -msg->reply;
                                        /* < 0 failure, otherwise ep_ref */
                clear_bit(ATM_VF_WAITING, &vcc->flags);
                break;
index 3fa0a9ee98d19ba5635554362ce23c3f80f0de69..878563a8354d107cca5253b87fb4d434cb23a0ef 100644 (file)
@@ -546,7 +546,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
                schedule();
        }
        finish_wait(sk_sleep(sk), &wait);
-       error = xchg(&sk->sk_err_soft, 0);
+       error = -xchg(&sk->sk_err_soft, 0);
 out:
        release_sock(sk);
        return error;
@@ -573,7 +573,7 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
                error = -EUNATCH;
                goto out;
        }
-       error = xchg(&sk->sk_err_soft, 0);
+       error = -xchg(&sk->sk_err_soft, 0);
 out:
        release_sock(sk);
        return error;
index fbd0acf80b13236bd8c768bc8bf5d69d6a7e7125..2fdebabbfacd14690abaca3c57dfcddf944466a1 100644 (file)
@@ -976,7 +976,8 @@ static int ax25_release(struct socket *sock)
                        release_sock(sk);
                        ax25_disconnect(ax25, 0);
                        lock_sock(sk);
-                       ax25_destroy_socket(ax25);
+                       if (!sock_flag(ax25->sk, SOCK_DESTROY))
+                               ax25_destroy_socket(ax25);
                        break;
 
                case AX25_STATE_3:
index 951cd57bb07df3930410a998b12c468ac413d62b..5237dff6941d81e3b2cd98c47013746c58798e77 100644 (file)
@@ -102,6 +102,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
        switch (ax25->state) {
 
        case AX25_STATE_0:
+       case AX25_STATE_2:
                /* Magic here: If we listen() and a new link dies before it
                   is accepted() it isn't 'dead' so doesn't get removed. */
                if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -111,6 +112,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
                                sock_hold(sk);
                                ax25_destroy_socket(ax25);
                                bh_unlock_sock(sk);
+                               /* Ungrab socket and destroy it */
                                sock_put(sk);
                        } else
                                ax25_destroy_socket(ax25);
@@ -213,7 +215,8 @@ void ax25_ds_t1_timeout(ax25_cb *ax25)
        case AX25_STATE_2:
                if (ax25->n2count == ax25->n2) {
                        ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
-                       ax25_disconnect(ax25, ETIMEDOUT);
+                       if (!sock_flag(ax25->sk, SOCK_DESTROY))
+                               ax25_disconnect(ax25, ETIMEDOUT);
                        return;
                } else {
                        ax25->n2count++;
index 004467c9e6e15ada9c6eae056cabff7dd8d5777e..2c0d6ef66f9d581d31b88ecf0875f78b90728a36 100644 (file)
@@ -38,6 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
 
        switch (ax25->state) {
        case AX25_STATE_0:
+       case AX25_STATE_2:
                /* Magic here: If we listen() and a new link dies before it
                   is accepted() it isn't 'dead' so doesn't get removed. */
                if (!sk || sock_flag(sk, SOCK_DESTROY) ||
@@ -47,6 +48,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
                                sock_hold(sk);
                                ax25_destroy_socket(ax25);
                                bh_unlock_sock(sk);
+                               /* Ungrab socket and destroy it */
                                sock_put(sk);
                        } else
                                ax25_destroy_socket(ax25);
@@ -144,7 +146,8 @@ void ax25_std_t1timer_expiry(ax25_cb *ax25)
        case AX25_STATE_2:
                if (ax25->n2count == ax25->n2) {
                        ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
-                       ax25_disconnect(ax25, ETIMEDOUT);
+                       if (!sock_flag(ax25->sk, SOCK_DESTROY))
+                               ax25_disconnect(ax25, ETIMEDOUT);
                        return;
                } else {
                        ax25->n2count++;
index 3b78e8473a01b4a82e376266b04078e714ce1e26..655a7d4c96e1e26b7390b8c783cebb2a31ca4ca8 100644 (file)
@@ -264,7 +264,8 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
 {
        ax25_clear_queues(ax25);
 
-       ax25_stop_heartbeat(ax25);
+       if (!sock_flag(ax25->sk, SOCK_DESTROY))
+               ax25_stop_heartbeat(ax25);
        ax25_stop_t1timer(ax25);
        ax25_stop_t2timer(ax25);
        ax25_stop_t3timer(ax25);
index 748a9ead7ce50fd65a0a09f2b19eaa035a566327..825a5cdf4382d08ff3b90c7499fd605f940b150b 100644 (file)
@@ -177,10 +177,21 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
 static void batadv_claim_release(struct kref *ref)
 {
        struct batadv_bla_claim *claim;
+       struct batadv_bla_backbone_gw *old_backbone_gw;
 
        claim = container_of(ref, struct batadv_bla_claim, refcount);
 
-       batadv_backbone_gw_put(claim->backbone_gw);
+       spin_lock_bh(&claim->backbone_lock);
+       old_backbone_gw = claim->backbone_gw;
+       claim->backbone_gw = NULL;
+       spin_unlock_bh(&claim->backbone_lock);
+
+       spin_lock_bh(&old_backbone_gw->crc_lock);
+       old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+       spin_unlock_bh(&old_backbone_gw->crc_lock);
+
+       batadv_backbone_gw_put(old_backbone_gw);
+
        kfree_rcu(claim, rcu);
 }
 
@@ -418,9 +429,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
                break;
        }
 
-       if (vid & BATADV_VLAN_HAS_TAG)
+       if (vid & BATADV_VLAN_HAS_TAG) {
                skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
                                      vid & VLAN_VID_MASK);
+               if (!skb)
+                       goto out;
+       }
 
        skb_reset_mac_header(skb);
        skb->protocol = eth_type_trans(skb, soft_iface);
@@ -674,8 +688,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                                 const u8 *mac, const unsigned short vid,
                                 struct batadv_bla_backbone_gw *backbone_gw)
 {
+       struct batadv_bla_backbone_gw *old_backbone_gw;
        struct batadv_bla_claim *claim;
        struct batadv_bla_claim search_claim;
+       bool remove_crc = false;
        int hash_added;
 
        ether_addr_copy(search_claim.addr, mac);
@@ -689,8 +705,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                        return;
 
                ether_addr_copy(claim->addr, mac);
+               spin_lock_init(&claim->backbone_lock);
                claim->vid = vid;
                claim->lasttime = jiffies;
+               kref_get(&backbone_gw->refcount);
                claim->backbone_gw = backbone_gw;
 
                kref_init(&claim->refcount);
@@ -718,15 +736,26 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                           "bla_add_claim(): changing ownership for %pM, vid %d\n",
                           mac, BATADV_PRINT_VID(vid));
 
-               spin_lock_bh(&claim->backbone_gw->crc_lock);
-               claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
-               spin_unlock_bh(&claim->backbone_gw->crc_lock);
-               batadv_backbone_gw_put(claim->backbone_gw);
+               remove_crc = true;
        }
-       /* set (new) backbone gw */
+
+       /* replace backbone_gw atomically and adjust reference counters */
+       spin_lock_bh(&claim->backbone_lock);
+       old_backbone_gw = claim->backbone_gw;
        kref_get(&backbone_gw->refcount);
        claim->backbone_gw = backbone_gw;
+       spin_unlock_bh(&claim->backbone_lock);
 
+       if (remove_crc) {
+               /* remove claim address from old backbone_gw */
+               spin_lock_bh(&old_backbone_gw->crc_lock);
+               old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+               spin_unlock_bh(&old_backbone_gw->crc_lock);
+       }
+
+       batadv_backbone_gw_put(old_backbone_gw);
+
+       /* add claim address to new backbone_gw */
        spin_lock_bh(&backbone_gw->crc_lock);
        backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
        spin_unlock_bh(&backbone_gw->crc_lock);
@@ -736,6 +765,26 @@ claim_free_ref:
        batadv_claim_put(claim);
 }
 
+/**
+ * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ *  claim
+ * @claim: claim whose backbone_gw should be returned
+ *
+ * Return: valid reference to claim::backbone_gw
+ */
+static struct batadv_bla_backbone_gw *
+batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
+{
+       struct batadv_bla_backbone_gw *backbone_gw;
+
+       spin_lock_bh(&claim->backbone_lock);
+       backbone_gw = claim->backbone_gw;
+       kref_get(&backbone_gw->refcount);
+       spin_unlock_bh(&claim->backbone_lock);
+
+       return backbone_gw;
+}
+
 /**
  * batadv_bla_del_claim - delete a claim from the claim hash
  * @bat_priv: the bat priv with all the soft interface information
@@ -760,10 +809,6 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                           batadv_choose_claim, claim);
        batadv_claim_put(claim); /* reference from the hash is gone */
 
-       spin_lock_bh(&claim->backbone_gw->crc_lock);
-       claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
-       spin_unlock_bh(&claim->backbone_gw->crc_lock);
-
        /* don't need the reference from hash_find() anymore */
        batadv_claim_put(claim);
 }
@@ -1216,6 +1261,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    int now)
 {
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_bla_claim *claim;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
@@ -1230,14 +1276,17 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(claim, head, hash_entry) {
+                       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
                        if (now)
                                goto purge_now;
-                       if (!batadv_compare_eth(claim->backbone_gw->orig,
+
+                       if (!batadv_compare_eth(backbone_gw->orig,
                                                primary_if->net_dev->dev_addr))
-                               continue;
+                               goto skip;
+
                        if (!batadv_has_timed_out(claim->lasttime,
                                                  BATADV_BLA_CLAIM_TIMEOUT))
-                               continue;
+                               goto skip;
 
                        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                                   "bla_purge_claims(): %pM, vid %d, time out\n",
@@ -1245,8 +1294,10 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
 
 purge_now:
                        batadv_handle_unclaim(bat_priv, primary_if,
-                                             claim->backbone_gw->orig,
+                                             backbone_gw->orig,
                                              claim->addr, claim->vid);
+skip:
+                       batadv_backbone_gw_put(backbone_gw);
                }
                rcu_read_unlock();
        }
@@ -1757,9 +1808,11 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                   unsigned short vid, bool is_bcast)
 {
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
        struct batadv_hard_iface *primary_if;
+       bool own_claim;
        bool ret;
 
        ethhdr = eth_hdr(skb);
@@ -1794,8 +1847,12 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
        }
 
        /* if it is our own claim ... */
-       if (batadv_compare_eth(claim->backbone_gw->orig,
-                              primary_if->net_dev->dev_addr)) {
+       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+       own_claim = batadv_compare_eth(backbone_gw->orig,
+                                      primary_if->net_dev->dev_addr);
+       batadv_backbone_gw_put(backbone_gw);
+
+       if (own_claim) {
                /* ... allow it in any case */
                claim->lasttime = jiffies;
                goto allow;
@@ -1859,7 +1916,9 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
 {
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_hard_iface *primary_if;
+       bool client_roamed;
        bool ret = false;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -1889,8 +1948,12 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                goto allow;
 
        /* check if we are responsible. */
-       if (batadv_compare_eth(claim->backbone_gw->orig,
-                              primary_if->net_dev->dev_addr)) {
+       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+       client_roamed = batadv_compare_eth(backbone_gw->orig,
+                                          primary_if->net_dev->dev_addr);
+       batadv_backbone_gw_put(backbone_gw);
+
+       if (client_roamed) {
                /* if yes, the client has roamed and we have
                 * to unclaim it.
                 */
@@ -1938,6 +2001,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
        struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_bla_claim *claim;
        struct batadv_hard_iface *primary_if;
        struct hlist_head *head;
@@ -1962,17 +2026,21 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(claim, head, hash_entry) {
-                       is_own = batadv_compare_eth(claim->backbone_gw->orig,
+                       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+
+                       is_own = batadv_compare_eth(backbone_gw->orig,
                                                    primary_addr);
 
-                       spin_lock_bh(&claim->backbone_gw->crc_lock);
-                       backbone_crc = claim->backbone_gw->crc;
-                       spin_unlock_bh(&claim->backbone_gw->crc_lock);
+                       spin_lock_bh(&backbone_gw->crc_lock);
+                       backbone_crc = backbone_gw->crc;
+                       spin_unlock_bh(&backbone_gw->crc_lock);
                        seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
                                   claim->addr, BATADV_PRINT_VID(claim->vid),
-                                  claim->backbone_gw->orig,
+                                  backbone_gw->orig,
                                   (is_own ? 'x' : ' '),
                                   backbone_crc);
+
+                       batadv_backbone_gw_put(backbone_gw);
                }
                rcu_read_unlock();
        }
index 278800a99c69458c083da635bd39bcd19e788842..aee3b39914717390ed4fe2899a781b6e7625451f 100644 (file)
@@ -1009,9 +1009,12 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                if (!skb_new)
                        goto out;
 
-               if (vid & BATADV_VLAN_HAS_TAG)
+               if (vid & BATADV_VLAN_HAS_TAG) {
                        skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
                                                  vid & VLAN_VID_MASK);
+                       if (!skb_new)
+                               goto out;
+               }
 
                skb_reset_mac_header(skb_new);
                skb_new->protocol = eth_type_trans(skb_new,
@@ -1089,9 +1092,12 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
         */
        skb_reset_mac_header(skb_new);
 
-       if (vid & BATADV_VLAN_HAS_TAG)
+       if (vid & BATADV_VLAN_HAS_TAG) {
                skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
                                          vid & VLAN_VID_MASK);
+               if (!skb_new)
+                       goto out;
+       }
 
        /* To preserve backwards compatibility, the node has choose the outgoing
         * format based on the incoming request packet type. The assumption is
index 7f51bc2c06eb7699492c41f2fe46fe85b26147a2..ab8c4f9738fe2206f878ed9f0dfda8803455ec5b 100644 (file)
@@ -765,6 +765,8 @@ static void batadv_orig_node_release(struct kref *ref)
        struct batadv_neigh_node *neigh_node;
        struct batadv_orig_node *orig_node;
        struct batadv_orig_ifinfo *orig_ifinfo;
+       struct batadv_orig_node_vlan *vlan;
+       struct batadv_orig_ifinfo *last_candidate;
 
        orig_node = container_of(ref, struct batadv_orig_node, refcount);
 
@@ -782,8 +784,21 @@ static void batadv_orig_node_release(struct kref *ref)
                hlist_del_rcu(&orig_ifinfo->list);
                batadv_orig_ifinfo_put(orig_ifinfo);
        }
+
+       last_candidate = orig_node->last_bonding_candidate;
+       orig_node->last_bonding_candidate = NULL;
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
+       if (last_candidate)
+               batadv_orig_ifinfo_put(last_candidate);
+
+       spin_lock_bh(&orig_node->vlan_list_lock);
+       hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
+               hlist_del_rcu(&vlan->list);
+               batadv_orig_node_vlan_put(vlan);
+       }
+       spin_unlock_bh(&orig_node->vlan_list_lock);
+
        /* Free nc_nodes */
        batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
 
index e3857ed4057f55d2f90ff72ed7cb8a14532d6fe2..bfac086b4d015053f88dee70e63bebd1ae9b3c56 100644 (file)
@@ -374,6 +374,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                if (skb_cow(skb, ETH_HLEN) < 0)
                        goto out;
 
+               ethhdr = eth_hdr(skb);
                icmph = (struct batadv_icmp_header *)skb->data;
                icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
                if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
@@ -454,6 +455,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
        return 0;
 }
 
+/**
+ * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * @orig_node: originator node whose bonding candidates should be replaced
+ * @new_candidate: new bonding candidate or NULL
+ */
+static void
+batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
+                           struct batadv_orig_ifinfo *new_candidate)
+{
+       struct batadv_orig_ifinfo *old_candidate;
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+       old_candidate = orig_node->last_bonding_candidate;
+
+       if (new_candidate)
+               kref_get(&new_candidate->refcount);
+       orig_node->last_bonding_candidate = new_candidate;
+       spin_unlock_bh(&orig_node->neigh_list_lock);
+
+       if (old_candidate)
+               batadv_orig_ifinfo_put(old_candidate);
+}
+
 /**
  * batadv_find_router - find a suitable router for this originator
  * @bat_priv: the bat priv with all the soft interface information
@@ -561,10 +585,6 @@ next:
        }
        rcu_read_unlock();
 
-       /* last_bonding_candidate is reset below, remove the old reference. */
-       if (orig_node->last_bonding_candidate)
-               batadv_orig_ifinfo_put(orig_node->last_bonding_candidate);
-
        /* After finding candidates, handle the three cases:
         * 1) there is a next candidate, use that
         * 2) there is no next candidate, use the first of the list
@@ -573,21 +593,28 @@ next:
        if (next_candidate) {
                batadv_neigh_node_put(router);
 
-               /* remove references to first candidate, we don't need it. */
-               if (first_candidate) {
-                       batadv_neigh_node_put(first_candidate_router);
-                       batadv_orig_ifinfo_put(first_candidate);
-               }
+               kref_get(&next_candidate_router->refcount);
                router = next_candidate_router;
-               orig_node->last_bonding_candidate = next_candidate;
+               batadv_last_bonding_replace(orig_node, next_candidate);
        } else if (first_candidate) {
                batadv_neigh_node_put(router);
 
-               /* refcounting has already been done in the loop above. */
+               kref_get(&first_candidate_router->refcount);
                router = first_candidate_router;
-               orig_node->last_bonding_candidate = first_candidate;
+               batadv_last_bonding_replace(orig_node, first_candidate);
        } else {
-               orig_node->last_bonding_candidate = NULL;
+               batadv_last_bonding_replace(orig_node, NULL);
+       }
+
+       /* cleanup of candidates */
+       if (first_candidate) {
+               batadv_neigh_node_put(first_candidate_router);
+               batadv_orig_ifinfo_put(first_candidate);
+       }
+
+       if (next_candidate) {
+               batadv_neigh_node_put(next_candidate_router);
+               batadv_orig_ifinfo_put(next_candidate);
        }
 
        return router;
index f2f125684ed9c199e1c4f7f7b3b6109b3b9a51c2..010397650fa5b7c2c4e4912dc680440dbaf7e21d 100644 (file)
@@ -424,8 +424,8 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
        struct batadv_orig_node *orig_node;
 
        orig_node = batadv_gw_get_selected_orig(bat_priv);
-       return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
-                                      orig_node, vid);
+       return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
+                                      BATADV_P_DATA, orig_node, vid);
 }
 
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
index 343d2c90439928cfd986654de265eeb10ca9a10a..287a3879ed7e38dfd0f0d3f79e2cb2580ea84276 100644 (file)
@@ -1033,7 +1033,9 @@ void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
 static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
                                          struct list_head *head)
 {
+       struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_hard_iface *hard_iface;
+       struct batadv_softif_vlan *vlan;
 
        list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
                if (hard_iface->soft_iface == soft_iface)
@@ -1041,6 +1043,13 @@ static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
                                                        BATADV_IF_CLEANUP_KEEP);
        }
 
+       /* destroy the "untagged" VLAN */
+       vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+       if (vlan) {
+               batadv_softif_destroy_vlan(bat_priv, vlan);
+               batadv_softif_vlan_put(vlan);
+       }
+
        batadv_sysfs_del_meshif(soft_iface);
        unregister_netdevice_queue(soft_iface, head);
 }
index feaf492b01ca011b221fbfb74bba295da794ac15..57ec87f37050d98faefc37e989024a572d26b421 100644 (file)
@@ -650,8 +650,10 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
 
        /* increase the refcounter of the related vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
-       if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
-                addr, BATADV_PRINT_VID(vid))) {
+       if (!vlan) {
+               net_ratelimited_function(batadv_info, soft_iface,
+                                        "adding TT local entry %pM to non-existent VLAN %d\n",
+                                        addr, BATADV_PRINT_VID(vid));
                kfree(tt_local);
                tt_local = NULL;
                goto out;
@@ -691,7 +693,6 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
        if (unlikely(hash_added != 0)) {
                /* remove the reference for the hash */
                batadv_tt_local_entry_put(tt_local);
-               batadv_softif_vlan_put(vlan);
                goto out;
        }
 
@@ -2269,6 +2270,29 @@ static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
        return crc;
 }
 
+/**
+ * batadv_tt_req_node_release - free tt_req node entry
+ * @ref: kref pointer of the tt req_node entry
+ */
+static void batadv_tt_req_node_release(struct kref *ref)
+{
+       struct batadv_tt_req_node *tt_req_node;
+
+       tt_req_node = container_of(ref, struct batadv_tt_req_node, refcount);
+
+       kfree(tt_req_node);
+}
+
+/**
+ * batadv_tt_req_node_put - decrement the tt_req_node refcounter and
+ *  possibly release it
+ * @tt_req_node: tt_req_node to be free'd
+ */
+static void batadv_tt_req_node_put(struct batadv_tt_req_node *tt_req_node)
+{
+       kref_put(&tt_req_node->refcount, batadv_tt_req_node_release);
+}
+
 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_req_node *node;
@@ -2278,7 +2302,7 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 
        hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
                hlist_del_init(&node->list);
-               kfree(node);
+               batadv_tt_req_node_put(node);
        }
 
        spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2315,7 +2339,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
                if (batadv_has_timed_out(node->issued_at,
                                         BATADV_TT_REQUEST_TIMEOUT)) {
                        hlist_del_init(&node->list);
-                       kfree(node);
+                       batadv_tt_req_node_put(node);
                }
        }
        spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2347,9 +2371,11 @@ batadv_tt_req_node_new(struct batadv_priv *bat_priv,
        if (!tt_req_node)
                goto unlock;
 
+       kref_init(&tt_req_node->refcount);
        ether_addr_copy(tt_req_node->addr, orig_node->orig);
        tt_req_node->issued_at = jiffies;
 
+       kref_get(&tt_req_node->refcount);
        hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
 unlock:
        spin_unlock_bh(&bat_priv->tt.req_list_lock);
@@ -2613,13 +2639,19 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv,
 out:
        if (primary_if)
                batadv_hardif_put(primary_if);
+
        if (ret && tt_req_node) {
                spin_lock_bh(&bat_priv->tt.req_list_lock);
-               /* hlist_del_init() verifies tt_req_node still is in the list */
-               hlist_del_init(&tt_req_node->list);
+               if (!hlist_unhashed(&tt_req_node->list)) {
+                       hlist_del_init(&tt_req_node->list);
+                       batadv_tt_req_node_put(tt_req_node);
+               }
                spin_unlock_bh(&bat_priv->tt.req_list_lock);
-               kfree(tt_req_node);
        }
+
+       if (tt_req_node)
+               batadv_tt_req_node_put(tt_req_node);
+
        kfree(tvlv_tt_data);
        return ret;
 }
@@ -3055,7 +3087,7 @@ static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
                if (!batadv_compare_eth(node->addr, resp_src))
                        continue;
                hlist_del_init(&node->list);
-               kfree(node);
+               batadv_tt_req_node_put(node);
        }
 
        spin_unlock_bh(&bat_priv->tt.req_list_lock);
index 6a577f4f8ba77ce1b2b923c55deada8a050802e8..74d865a4df464f03a54f1d7f9f87ad5a095ad088 100644 (file)
@@ -330,7 +330,9 @@ struct batadv_orig_node {
        DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
        u32 last_bcast_seqno;
        struct hlist_head neigh_list;
-       /* neigh_list_lock protects: neigh_list and router */
+       /* neigh_list_lock protects: neigh_list, ifinfo_list,
+        * last_bonding_candidate and router
+        */
        spinlock_t neigh_list_lock;
        struct hlist_node hash_entry;
        struct batadv_priv *bat_priv;
@@ -1042,6 +1044,7 @@ struct batadv_bla_backbone_gw {
  * @addr: mac address of claimed non-mesh client
  * @vid: vlan id this client was detected on
  * @backbone_gw: pointer to backbone gw claiming this client
+ * @backbone_lock: lock protecting backbone_gw pointer
  * @lasttime: last time we heard of claim (locals only)
  * @hash_entry: hlist node for batadv_priv_bla::claim_hash
  * @refcount: number of contexts the object is used
@@ -1051,6 +1054,7 @@ struct batadv_bla_claim {
        u8 addr[ETH_ALEN];
        unsigned short vid;
        struct batadv_bla_backbone_gw *backbone_gw;
+       spinlock_t backbone_lock; /* protects backbone_gw */
        unsigned long lasttime;
        struct hlist_node hash_entry;
        struct rcu_head rcu;
@@ -1137,11 +1141,13 @@ struct batadv_tt_change_node {
  * struct batadv_tt_req_node - data to keep track of the tt requests in flight
  * @addr: mac address address of the originator this request was sent to
  * @issued_at: timestamp used for purging stale tt requests
+ * @refcount: number of contexts the object is used by
  * @list: list node for batadv_priv_tt::req_list
  */
 struct batadv_tt_req_node {
        u8 addr[ETH_ALEN];
        unsigned long issued_at;
+       struct kref refcount;
        struct hlist_node list;
 };
 
index dcea4f4c62b3b43d701fb68a74a6aa1a1e0aa8cf..c18080ad408572f53df75e18e2b56f714f784edb 100644 (file)
@@ -279,6 +279,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
         * change from under us.
         */
        list_for_each_entry(v, &vg->vlan_list, vlist) {
+               if (!br_vlan_should_use(v))
+                       continue;
                f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
                if (f && f->is_local && !f->dst)
                        fdb_delete_local(br, NULL, f);
index 16079772222860977264133fa49bb0cf33b9b0d5..43d2cd862bc260f81c3ea51c1b3106984db45623 100644 (file)
@@ -213,8 +213,7 @@ drop:
 }
 EXPORT_SYMBOL_GPL(br_handle_frame_finish);
 
-/* note: already called with rcu_read_lock */
-static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static void __br_handle_local_finish(struct sk_buff *skb)
 {
        struct net_bridge_port *p = br_port_get_rcu(skb->dev);
        u16 vid = 0;
@@ -222,6 +221,14 @@ static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_bu
        /* check if vlan is allowed, to avoid spoofing */
        if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
                br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
+}
+
+/* note: already called with rcu_read_lock */
+static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+       struct net_bridge_port *p = br_port_get_rcu(skb->dev);
+
+       __br_handle_local_finish(skb);
 
        BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
        br_pass_frame_up(skb);
@@ -274,7 +281,9 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                        if (p->br->stp_enabled == BR_NO_STP ||
                            fwd_mask & (1u << dest[5]))
                                goto forward;
-                       break;
+                       *pskb = skb;
+                       __br_handle_local_finish(skb);
+                       return RX_HANDLER_PASS;
 
                case 0x01:      /* IEEE MAC (Pause) */
                        goto drop;
index 6852f3c7009c2b1cc2cbfa9e08b6b15104b4d12b..43844144c9c4f17a11252e3b1cc1ef4162d7ddb7 100644 (file)
@@ -464,8 +464,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
                               &ip6h->saddr)) {
                kfree_skb(skb);
+               br->has_ipv6_addr = 0;
                return NULL;
        }
+
+       br->has_ipv6_addr = 1;
        ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
 
        hopopt = (u8 *)(ip6h + 1);
@@ -1745,6 +1748,7 @@ void br_multicast_init(struct net_bridge *br)
        br->ip6_other_query.delay_time = 0;
        br->ip6_querier.port = NULL;
 #endif
+       br->has_ipv6_addr = 1;
 
        spin_lock_init(&br->multicast_lock);
        setup_timer(&br->multicast_router_timer,
index 2d25979273a6f57378da645460d9d6c2a0d91e5c..77e7f69bf80d4ca8e31e09b5b07230bca1abf170 100644 (file)
@@ -700,7 +700,7 @@ static int
 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                  int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
-       unsigned int mtu = ip_skb_dst_mtu(skb);
+       unsigned int mtu = ip_skb_dst_mtu(sk, skb);
        struct iphdr *iph = ip_hdr(skb);
 
        if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
index a5343c7232bfbf37ca95580a70a740950354e691..85e89f693589d1a7e712d9165cdac8147e0bf02c 100644 (file)
@@ -1273,7 +1273,7 @@ static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev,
                struct bridge_vlan_xstats vxi;
                struct br_vlan_stats stats;
 
-               if (vl_idx++ < *prividx)
+               if (++vl_idx < *prividx)
                        continue;
                memset(&vxi, 0, sizeof(vxi));
                vxi.vid = v->vid;
index c7fb5d7a7218c2c286160874fc30278108083ca6..52edecf3c294cde218e79e90bf86c7595578b062 100644 (file)
@@ -314,6 +314,7 @@ struct net_bridge
        u8                              multicast_disabled:1;
        u8                              multicast_querier:1;
        u8                              multicast_query_use_ifaddr:1;
+       u8                              has_ipv6_addr:1;
 
        u32                             hash_elasticity;
        u32                             hash_max;
@@ -588,10 +589,22 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
 
 static inline bool
 __br_multicast_querier_exists(struct net_bridge *br,
-                             struct bridge_mcast_other_query *querier)
+                               struct bridge_mcast_other_query *querier,
+                               const bool is_ipv6)
 {
+       bool own_querier_enabled;
+
+       if (br->multicast_querier) {
+               if (is_ipv6 && !br->has_ipv6_addr)
+                       own_querier_enabled = false;
+               else
+                       own_querier_enabled = true;
+       } else {
+               own_querier_enabled = false;
+       }
+
        return time_is_before_jiffies(querier->delay_time) &&
-              (br->multicast_querier || timer_pending(&querier->timer));
+              (own_querier_enabled || timer_pending(&querier->timer));
 }
 
 static inline bool br_multicast_querier_exists(struct net_bridge *br,
@@ -599,10 +612,12 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
 {
        switch (eth->h_proto) {
        case (htons(ETH_P_IP)):
-               return __br_multicast_querier_exists(br, &br->ip4_other_query);
+               return __br_multicast_querier_exists(br,
+                       &br->ip4_other_query, false);
 #if IS_ENABLED(CONFIG_IPV6)
        case (htons(ETH_P_IPV6)):
-               return __br_multicast_querier_exists(br, &br->ip6_other_query);
+               return __br_multicast_querier_exists(br,
+                       &br->ip6_other_query, true);
 #endif
        default:
                return false;
index 0160d7d09a1e975b3cb8311d202f38462401efbf..89469592076cff8d9c8450564d23b5fb10a46c44 100644 (file)
@@ -1276,9 +1276,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc,
                                    const struct ceph_osd_request_target *t,
                                    struct ceph_pg_pool_info *pi)
 {
-       bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
-       bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
-                      ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+       bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+       bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+                      ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
                       __pool_full(pi);
 
        WARN_ON(pi->id != t->base_oloc.pool);
@@ -1303,8 +1303,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
        bool force_resend = false;
        bool need_check_tiering = false;
        bool need_resend = false;
-       bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap,
-                                            CEPH_OSDMAP_SORTBITWISE);
+       bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
        enum calc_target_result ct_res;
        int ret;
 
@@ -1540,9 +1539,9 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
         */
        msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
 
-       dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__,
-            req, req->r_t.target_oid.name_len, req->r_t.target_oid.name,
-            req->r_t.target_oid.name_len, msg->front.iov_len, data_len);
+       dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
+            req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
+            msg->front.iov_len, data_len);
 }
 
 /*
@@ -1590,9 +1589,9 @@ static void maybe_request_map(struct ceph_osd_client *osdc)
        verify_osdc_locked(osdc);
        WARN_ON(!osdc->osdmap->epoch);
 
-       if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
-           ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
-           ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
+       if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
+           ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
+           ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
                dout("%s osdc %p continuous\n", __func__, osdc);
                continuous = true;
        } else {
@@ -1629,19 +1628,19 @@ again:
        }
 
        if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
-           ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
+           ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
                dout("req %p pausewr\n", req);
                req->r_t.paused = true;
                maybe_request_map(osdc);
        } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
-                  ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
+                  ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
                dout("req %p pauserd\n", req);
                req->r_t.paused = true;
                maybe_request_map(osdc);
        } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
                   !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
                                     CEPH_OSD_FLAG_FULL_FORCE)) &&
-                  (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+                  (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
                    pool_full(osdc, req->r_t.base_oloc.pool))) {
                dout("req %p full/pool_full\n", req);
                pr_warn_ratelimited("FULL or reached pool quota\n");
@@ -2280,7 +2279,7 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq)
        struct ceph_osd_request *req = lreq->ping_req;
        struct ceph_osd_req_op *op = &req->r_ops[0];
 
-       if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
+       if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
                dout("%s PAUSERD\n", __func__);
                return;
        }
@@ -2893,6 +2892,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
                        dout("req %p tid %llu cb\n", req, req->r_tid);
                        __complete_request(req);
                }
+               if (m.flags & CEPH_OSD_FLAG_ONDISK)
+                       complete_all(&req->r_safe_completion);
+               ceph_osdc_put_request(req);
        } else {
                if (req->r_unsafe_callback) {
                        dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
@@ -2901,10 +2903,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
                        WARN_ON(1);
                }
        }
-       if (m.flags & CEPH_OSD_FLAG_ONDISK)
-               complete_all(&req->r_safe_completion);
 
-       ceph_osdc_put_request(req);
        return;
 
 fail_request:
@@ -3050,7 +3049,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
        bool skipped_map = false;
        bool was_full;
 
-       was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+       was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
        set_pool_was_full(osdc);
 
        if (incremental)
@@ -3088,7 +3087,7 @@ static int handle_one_map(struct ceph_osd_client *osdc,
                osdc->osdmap = newmap;
        }
 
-       was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+       was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
        scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
                      need_resend, need_resend_linger);
 
@@ -3174,9 +3173,9 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
        if (ceph_check_fsid(osdc->client, &fsid) < 0)
                goto bad;
 
-       was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
-       was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
-                     ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+       was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+       was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+                     ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
                      have_pool_full(osdc);
 
        /* incremental maps */
@@ -3238,9 +3237,9 @@ done:
         * we find out when we are no longer full and stop returning
         * ENOSPC.
         */
-       pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
-       pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
-                 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+       pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
+       pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
+                 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
                  have_pool_full(osdc);
        if (was_pauserd || was_pausewr || pauserd || pausewr)
                maybe_request_map(osdc);
index cde52e94732f36f7a4738a9c0f56296640560494..7e480bf75bcf5bfd69bd89cdccf29fc082a15e16 100644 (file)
@@ -1260,6 +1260,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
        return map;
 }
 
+/*
+ * Encoding order is (new_up_client, new_state, new_weight).  Need to
+ * apply in the (new_weight, new_state, new_up_client) order, because
+ * an incremental map may look like e.g.
+ *
+ *     new_up_client: { osd=6, addr=... } # set osd_state and addr
+ *     new_state: { osd=6, xorstate=EXISTS } # clear osd_state
+ */
+static int decode_new_up_state_weight(void **p, void *end,
+                                     struct ceph_osdmap *map)
+{
+       void *new_up_client;
+       void *new_state;
+       void *new_weight_end;
+       u32 len;
+
+       new_up_client = *p;
+       ceph_decode_32_safe(p, end, len, e_inval);
+       len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
+       ceph_decode_need(p, end, len, e_inval);
+       *p += len;
+
+       new_state = *p;
+       ceph_decode_32_safe(p, end, len, e_inval);
+       len *= sizeof(u32) + sizeof(u8);
+       ceph_decode_need(p, end, len, e_inval);
+       *p += len;
+
+       /* new_weight */
+       ceph_decode_32_safe(p, end, len, e_inval);
+       while (len--) {
+               s32 osd;
+               u32 w;
+
+               ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
+               osd = ceph_decode_32(p);
+               w = ceph_decode_32(p);
+               BUG_ON(osd >= map->max_osd);
+               pr_info("osd%d weight 0x%x %s\n", osd, w,
+                    w == CEPH_OSD_IN ? "(in)" :
+                    (w == CEPH_OSD_OUT ? "(out)" : ""));
+               map->osd_weight[osd] = w;
+
+               /*
+                * If we are marking in, set the EXISTS, and clear the
+                * AUTOOUT and NEW bits.
+                */
+               if (w) {
+                       map->osd_state[osd] |= CEPH_OSD_EXISTS;
+                       map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
+                                                CEPH_OSD_NEW);
+               }
+       }
+       new_weight_end = *p;
+
+       /* new_state (up/down) */
+       *p = new_state;
+       len = ceph_decode_32(p);
+       while (len--) {
+               s32 osd;
+               u8 xorstate;
+               int ret;
+
+               osd = ceph_decode_32(p);
+               xorstate = ceph_decode_8(p);
+               if (xorstate == 0)
+                       xorstate = CEPH_OSD_UP;
+               BUG_ON(osd >= map->max_osd);
+               if ((map->osd_state[osd] & CEPH_OSD_UP) &&
+                   (xorstate & CEPH_OSD_UP))
+                       pr_info("osd%d down\n", osd);
+               if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
+                   (xorstate & CEPH_OSD_EXISTS)) {
+                       pr_info("osd%d does not exist\n", osd);
+                       map->osd_weight[osd] = CEPH_OSD_IN;
+                       ret = set_primary_affinity(map, osd,
+                                                  CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
+                       if (ret)
+                               return ret;
+                       memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
+                       map->osd_state[osd] = 0;
+               } else {
+                       map->osd_state[osd] ^= xorstate;
+               }
+       }
+
+       /* new_up_client */
+       *p = new_up_client;
+       len = ceph_decode_32(p);
+       while (len--) {
+               s32 osd;
+               struct ceph_entity_addr addr;
+
+               osd = ceph_decode_32(p);
+               ceph_decode_copy(p, &addr, sizeof(addr));
+               ceph_decode_addr(&addr);
+               BUG_ON(osd >= map->max_osd);
+               pr_info("osd%d up\n", osd);
+               map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
+               map->osd_addr[osd] = addr;
+       }
+
+       *p = new_weight_end;
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
 /*
  * decode and apply an incremental map update.
  */
@@ -1358,49 +1467,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                        __remove_pg_pool(&map->pg_pools, pi);
        }
 
-       /* new_up */
-       ceph_decode_32_safe(p, end, len, e_inval);
-       while (len--) {
-               u32 osd;
-               struct ceph_entity_addr addr;
-               ceph_decode_32_safe(p, end, osd, e_inval);
-               ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
-               ceph_decode_addr(&addr);
-               pr_info("osd%d up\n", osd);
-               BUG_ON(osd >= map->max_osd);
-               map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
-               map->osd_addr[osd] = addr;
-       }
-
-       /* new_state */
-       ceph_decode_32_safe(p, end, len, e_inval);
-       while (len--) {
-               u32 osd;
-               u8 xorstate;
-               ceph_decode_32_safe(p, end, osd, e_inval);
-               xorstate = **(u8 **)p;
-               (*p)++;  /* clean flag */
-               if (xorstate == 0)
-                       xorstate = CEPH_OSD_UP;
-               if (xorstate & CEPH_OSD_UP)
-                       pr_info("osd%d down\n", osd);
-               if (osd < map->max_osd)
-                       map->osd_state[osd] ^= xorstate;
-       }
-
-       /* new_weight */
-       ceph_decode_32_safe(p, end, len, e_inval);
-       while (len--) {
-               u32 osd, off;
-               ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
-               osd = ceph_decode_32(p);
-               off = ceph_decode_32(p);
-               pr_info("osd%d weight 0x%x %s\n", osd, off,
-                    off == CEPH_OSD_IN ? "(in)" :
-                    (off == CEPH_OSD_OUT ? "(out)" : ""));
-               if (osd < map->max_osd)
-                       map->osd_weight[osd] = off;
-       }
+       /* new_up_client, new_state, new_weight */
+       err = decode_new_up_state_weight(p, end, map);
+       if (err)
+               goto bad;
 
        /* new_pg_temp */
        err = decode_new_pg_temp(p, end, map);
@@ -1778,8 +1848,8 @@ int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
        raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
                                       oid->name_len);
 
-       dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len,
-            oid->name, raw_pgid->pool, raw_pgid->seed);
+       dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
+            raw_pgid->pool, raw_pgid->seed);
        return 0;
 }
 EXPORT_SYMBOL(ceph_object_locator_to_pg);
index 5cfd26a0006f07d15ee62a5660e0177745f454cf..1cd2ec046164a659d7e4ad1b1acc24eceb780a6e 100644 (file)
@@ -309,8 +309,8 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
        __scm_destroy(scm);
 }
 
-static int do_set_attach_filter(struct socket *sock, int level, int optname,
-                               char __user *optval, unsigned int optlen)
+/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
+struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
 {
        struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
        struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
@@ -323,6 +323,19 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
            __get_user(ptr, &fprog32->filter) ||
            __put_user(len, &kfprog->len) ||
            __put_user(compat_ptr(ptr), &kfprog->filter))
+               return NULL;
+
+       return kfprog;
+}
+EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
+
+static int do_set_attach_filter(struct socket *sock, int level, int optname,
+                               char __user *optval, unsigned int optlen)
+{
+       struct sock_fprog __user *kfprog;
+
+       kfprog = get_compat_bpf_fprog(optval);
+       if (!kfprog)
                return -EFAULT;
 
        return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
@@ -354,7 +367,8 @@ static int do_set_sock_timeout(struct socket *sock, int level,
 static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
                                char __user *optval, unsigned int optlen)
 {
-       if (optname == SO_ATTACH_FILTER)
+       if (optname == SO_ATTACH_FILTER ||
+           optname == SO_ATTACH_REUSEPORT_CBPF)
                return do_set_attach_filter(sock, level, optname,
                                            optval, optlen);
        if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
index 68adb5f52110d85fead496a8a76a7248ae8cefae..e759d90e8cef037ec117a7e70941c3be050e3103 100644 (file)
 #include <net/sock_reuseport.h>
 
 /**
- *     sk_filter - run a packet through a socket filter
+ *     sk_filter_trim_cap - run a packet through a socket filter
  *     @sk: sock associated with &sk_buff
  *     @skb: buffer to filter
+ *     @cap: limit on how short the eBPF program may trim the packet
  *
  * Run the eBPF program and then cut skb->data to correct size returned by
  * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
@@ -64,7 +65,7 @@
  * be accepted or -EPERM if the packet should be tossed.
  *
  */
-int sk_filter(struct sock *sk, struct sk_buff *skb)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
 {
        int err;
        struct sk_filter *filter;
@@ -85,14 +86,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
        filter = rcu_dereference(sk->sk_filter);
        if (filter) {
                unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
-
-               err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+               err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
        }
        rcu_read_unlock();
 
        return err;
 }
-EXPORT_SYMBOL(sk_filter);
+EXPORT_SYMBOL(sk_filter_trim_cap);
 
 static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
@@ -2085,7 +2085,8 @@ static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 }
 
 static bool sk_filter_is_valid_access(int off, int size,
-                                     enum bpf_access_type type)
+                                     enum bpf_access_type type,
+                                     enum bpf_reg_type *reg_type)
 {
        switch (off) {
        case offsetof(struct __sk_buff, tc_classid):
@@ -2108,7 +2109,8 @@ static bool sk_filter_is_valid_access(int off, int size,
 }
 
 static bool tc_cls_act_is_valid_access(int off, int size,
-                                      enum bpf_access_type type)
+                                      enum bpf_access_type type,
+                                      enum bpf_reg_type *reg_type)
 {
        if (type == BPF_WRITE) {
                switch (off) {
@@ -2123,6 +2125,16 @@ static bool tc_cls_act_is_valid_access(int off, int size,
                        return false;
                }
        }
+
+       switch (off) {
+       case offsetof(struct __sk_buff, data):
+               *reg_type = PTR_TO_PACKET;
+               break;
+       case offsetof(struct __sk_buff, data_end):
+               *reg_type = PTR_TO_PACKET_END;
+               break;
+       }
+
        return __is_valid_access(off, size, type);
 }
 
index a669dea146c61b2f7f5b1feaf46512b135ee28f7..61ad43f61c5edbffa48f4983d8bf1a7472a66cdc 100644 (file)
@@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
 }
 EXPORT_SYMBOL(make_flow_keys_digest);
 
+static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
+
+u32 __skb_get_hash_symmetric(struct sk_buff *skb)
+{
+       struct flow_keys keys;
+
+       __flow_hash_secret_init();
+
+       memset(&keys, 0, sizeof(keys));
+       __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
+                          NULL, 0, 0, 0,
+                          FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+       return __flow_hash_from_keys(&keys, hashrnd);
+}
+EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+
 /**
  * __skb_get_hash: calculate a flow hash
  * @skb: sk_buff to calculate flow hash from
@@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
        },
 };
 
+static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
+       {
+               .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+               .offset = offsetof(struct flow_keys, control),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_BASIC,
+               .offset = offsetof(struct flow_keys, basic),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v4addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v6addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_PORTS,
+               .offset = offsetof(struct flow_keys, ports),
+       },
+};
+
 static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
        {
                .key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
        skb_flow_dissector_init(&flow_keys_dissector,
                                flow_keys_dissector_keys,
                                ARRAY_SIZE(flow_keys_dissector_keys));
+       skb_flow_dissector_init(&flow_keys_dissector_symmetric,
+                               flow_keys_dissector_symmetric_keys,
+                               ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
        skb_flow_dissector_init(&flow_keys_buf_dissector,
                                flow_keys_buf_dissector_keys,
                                ARRAY_SIZE(flow_keys_buf_dissector_keys));
index f96ee8b9478d8a752e78e1ca39c2351f5863f054..be873e4e312572516a2ab47cdac6053efd3459af 100644 (file)
@@ -47,6 +47,7 @@ nla_put_failure:
  * @xstats_type: TLV type for backward compatibility xstats TLV
  * @lock: statistics lock
  * @d: dumping handle
+ * @padattr: padding attribute
  *
  * Initializes the dumping handle, grabs the statistic lock and appends
  * an empty TLV header to the socket buffer for use a container for all
@@ -87,6 +88,7 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat);
  * @type: TLV type for top level statistic TLV
  * @lock: statistics lock
  * @d: dumping handle
+ * @padattr: padding attribute
  *
  * Initializes the dumping handle, grabs the statistic lock and appends
  * an empty TLV header to the socket buffer for use a container for all
index 941c284868966971fb685e68e71878f5c2185d80..2cab489ae62e657c2ec99cca71b558d9a3b3cf7a 100644 (file)
@@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp)
        spin_lock_irqsave(&bm_pool->lock, flags);
        if (bm_pool->buf_num == bm_pool->size) {
                pr_warn("pool already filled\n");
+               spin_unlock_irqrestore(&bm_pool->lock, flags);
                return bm_pool->buf_num;
        }
 
        if (buf_num + bm_pool->buf_num > bm_pool->size) {
                pr_warn("cannot allocate %d buffers for pool\n",
                        buf_num);
+               spin_unlock_irqrestore(&bm_pool->lock, flags);
                return 0;
        }
 
        if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) {
                pr_warn("Adding %d buffers to the %d current buffers will overflow\n",
                        buf_num,  bm_pool->buf_num);
+               spin_unlock_irqrestore(&bm_pool->lock, flags);
                return 0;
        }
 
index 29dd8cc22bbf87a05d555ff16d62e6dee0b536d4..510cd62fcb9963f92591e03b6c4d40502f2f37bf 100644 (file)
@@ -2469,13 +2469,17 @@ int neigh_xmit(int index, struct net_device *dev,
                tbl = neigh_tables[index];
                if (!tbl)
                        goto out;
+               rcu_read_lock_bh();
                neigh = __neigh_lookup_noref(tbl, addr, dev);
                if (!neigh)
                        neigh = __neigh_create(tbl, addr, dev, false);
                err = PTR_ERR(neigh);
-               if (IS_ERR(neigh))
+               if (IS_ERR(neigh)) {
+                       rcu_read_unlock_bh();
                        goto out_kfree_skb;
+               }
                err = neigh->output(neigh, skb);
+               rcu_read_unlock_bh();
        }
        else if (index == NEIGH_LINK_TABLE) {
                err = dev_hard_header(skb, dev, ntohs(skb->protocol),
index 2b3f76fe65f49f8018c63aa44b345262fb40cde7..7a0b616557abb6570dbc86a0a7cf04a50a83214b 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/jiffies.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
+#include <linux/of_net.h>
 
 #include "net-sysfs.h"
 
index 8604ae2459608f36c2666fed3b2db2d06eda98af..8b02df0d354da6ca34d148ff72a333491f4bcd15 100644 (file)
@@ -2245,10 +2245,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
        hrtimer_set_expires(&t.timer, spin_until);
 
        remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
-       if (remaining <= 0) {
-               pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
-               return;
-       }
+       if (remaining <= 0)
+               goto out;
 
        start_time = ktime_get();
        if (remaining < 100000) {
@@ -2273,7 +2271,9 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
        }
 
        pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
+out:
        pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
+       destroy_hrtimer_on_stack(&t.timer);
 }
 
 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
index f2b77e549c03a771909cd9c87c40ec2b7826cd31..eb12d2161fb2b5f9f26ebb8f534bbd6a673b65b1 100644 (file)
@@ -3015,24 +3015,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
 }
 EXPORT_SYMBOL_GPL(skb_append_pagefrags);
 
-/**
- *     skb_push_rcsum - push skb and update receive checksum
- *     @skb: buffer to update
- *     @len: length of data pulled
- *
- *     This function performs an skb_push on the packet and updates
- *     the CHECKSUM_COMPLETE checksum.  It should be used on
- *     receive path processing instead of skb_push unless you know
- *     that the checksum difference is zero (e.g., a valid IP header)
- *     or you are setting ip_summed to CHECKSUM_NONE.
- */
-static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
-{
-       skb_push(skb, len);
-       skb_postpush_rcsum(skb, skb->data, len);
-       return skb->data;
-}
-
 /**
  *     skb_pull_rcsum - pull skb and update receive checksum
  *     @skb: buffer to update
index 08bf97eceeb3827d0b237d8c01910e5e0a0f5d6a..25dab8b60223e25ee92dae45a226fce2a6bb5a03 100644 (file)
@@ -452,11 +452,12 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_queue_rcv_skb);
 
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+                    const int nested, unsigned int trim_cap)
 {
        int rc = NET_RX_SUCCESS;
 
-       if (sk_filter(sk, skb))
+       if (sk_filter_trim_cap(sk, skb, trim_cap))
                goto discard_and_relse;
 
        skb->dev = NULL;
@@ -492,7 +493,7 @@ discard_and_relse:
        kfree_skb(skb);
        goto out;
 }
-EXPORT_SYMBOL(sk_receive_skb);
+EXPORT_SYMBOL(__sk_receive_skb);
 
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 {
@@ -1938,6 +1939,10 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
                sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
                sockc->tsflags |= tsflags;
                break;
+       /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
+       case SCM_RIGHTS:
+       case SCM_CREDENTIALS:
+               break;
        default:
                return -EINVAL;
        }
index 5c7e413a3ae407e67565b48a8bd6f43e3b02de4d..345a3aeb8c7e36449a765298cd6512eab8cfef4b 100644 (file)
@@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
        if (IS_ERR(rt)) {
-               __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+               IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
                return NULL;
        }
 
@@ -527,17 +527,19 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
                                                                 rxiph->daddr);
        skb_dst_set(skb, dst_clone(dst));
 
+       local_bh_disable();
        bh_lock_sock(ctl_sk);
        err = ip_build_and_send_pkt(skb, ctl_sk,
                                    rxiph->daddr, rxiph->saddr, NULL);
        bh_unlock_sock(ctl_sk);
 
        if (net_xmit_eval(err) == 0) {
-               DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
-               DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
+               __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+               __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
        }
+       local_bh_enable();
 out:
-        dst_release(dst);
+       dst_release(dst);
 }
 
 static void dccp_v4_reqsk_destructor(struct request_sock *req)
@@ -866,7 +868,7 @@ lookup:
                goto discard_and_relse;
        nf_reset(skb);
 
-       return sk_receive_skb(sk, skb, 1);
+       return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
 
 no_dccp_socket:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
index d176f4e66369a399f5fe8a440eb864dbac9c7542..3ff137d9471d8420748fd72c8631d67eb57ffb66 100644 (file)
@@ -732,7 +732,7 @@ lookup:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
-       return sk_receive_skb(sk, skb, 1) ? -1 : 0;
+       return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
 
 no_dccp_socket:
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
index df48034378889eac83c6b262a76a32028202efb4..a796fc7cbc3542972eaa19bbc3d3dc07a428ff1c 100644 (file)
@@ -41,6 +41,7 @@
 #include <net/dn_fib.h>
 #include <net/dn_neigh.h>
 #include <net/dn_dev.h>
+#include <net/nexthop.h>
 
 #define RT_MIN_TABLE 1
 
@@ -150,14 +151,13 @@ static int dn_fib_count_nhs(const struct nlattr *attr)
        struct rtnexthop *nhp = nla_data(attr);
        int nhs = 0, nhlen = nla_len(attr);
 
-       while(nhlen >= (int)sizeof(struct rtnexthop)) {
-               if ((nhlen -= nhp->rtnh_len) < 0)
-                       return 0;
+       while (rtnh_ok(nhp, nhlen)) {
                nhs++;
-               nhp = RTNH_NEXT(nhp);
+               nhp = rtnh_next(nhp, &nhlen);
        }
 
-       return nhs;
+       /* leftover implies invalid nexthop configuration, discard it */
+       return nhlen > 0 ? 0 : nhs;
 }
 
 static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
@@ -167,21 +167,24 @@ static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
        int nhlen = nla_len(attr);
 
        change_nexthops(fi) {
-               int attrlen = nhlen - sizeof(struct rtnexthop);
-               if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
+               int attrlen;
+
+               if (!rtnh_ok(nhp, nhlen))
                        return -EINVAL;
 
                nh->nh_flags  = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
                nh->nh_oif    = nhp->rtnh_ifindex;
                nh->nh_weight = nhp->rtnh_hops + 1;
 
-               if (attrlen) {
+               attrlen = rtnh_attrlen(nhp);
+               if (attrlen > 0) {
                        struct nlattr *gw_attr;
 
                        gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
                        nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
                }
-               nhp = RTNH_NEXT(nhp);
+
+               nhp = rtnh_next(nhp, &nhlen);
        } endfor_nexthops(fi);
 
        return 0;
index ca207dbf673bc0d203005ea90c7b5f1d787a4840..116187b5c26712a4b62d95e84e35c02406712cb5 100644 (file)
@@ -1289,8 +1289,8 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
                                     nl802154_dev_addr_policy))
                return -EINVAL;
 
-       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] &&
-           !attrs[NL802154_DEV_ADDR_ATTR_MODE] &&
+       if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
+           !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
            !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
              attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
                return -EINVAL;
index 377424ea17a4ea7a35e21e21238650931b1b6c7d..d39e9e47a26e55ad2b8f775bf9ea9dfb5b12aee5 100644 (file)
@@ -1681,6 +1681,14 @@ static __net_init int inet_init_net(struct net *net)
         */
        net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
        net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
+
+       /* Default values for sysctl-controlled parameters.
+        * We set them here, in case sysctl is not compiled.
+        */
+       net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
+       net->ipv4.sysctl_ip_dynaddr = 0;
+       net->ipv4.sysctl_ip_early_demux = 1;
+
        return 0;
 }
 
index 477937465a202ccdf458a808776ea3e90c853b84..d95631d0924899f17549e69801567bed076491b9 100644 (file)
@@ -23,6 +23,11 @@ struct esp_skb_cb {
        void *tmp;
 };
 
+struct esp_output_extra {
+       __be32 seqhi;
+       u32 esphoff;
+};
+
 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
 
 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
@@ -35,11 +40,11 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
  *
  * TODO: Use spare space in skb for this where possible.
  */
-static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
 {
        unsigned int len;
 
-       len = seqhilen;
+       len = extralen;
 
        len += crypto_aead_ivsize(aead);
 
@@ -57,15 +62,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
        return kmalloc(len, GFP_ATOMIC);
 }
 
-static inline __be32 *esp_tmp_seqhi(void *tmp)
+static inline void *esp_tmp_extra(void *tmp)
 {
-       return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
+       return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
 }
-static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
 {
        return crypto_aead_ivsize(aead) ?
-              PTR_ALIGN((u8 *)tmp + seqhilen,
-                        crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
+              PTR_ALIGN((u8 *)tmp + extralen,
+                        crypto_aead_alignmask(aead) + 1) : tmp + extralen;
 }
 
 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
@@ -99,7 +105,7 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
 {
        struct ip_esp_hdr *esph = (void *)(skb->data + offset);
        void *tmp = ESP_SKB_CB(skb)->tmp;
-       __be32 *seqhi = esp_tmp_seqhi(tmp);
+       __be32 *seqhi = esp_tmp_extra(tmp);
 
        esph->seq_no = esph->spi;
        esph->spi = *seqhi;
@@ -107,7 +113,11 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
 
 static void esp_output_restore_header(struct sk_buff *skb)
 {
-       esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
+       void *tmp = ESP_SKB_CB(skb)->tmp;
+       struct esp_output_extra *extra = esp_tmp_extra(tmp);
+
+       esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
+                               sizeof(__be32));
 }
 
 static void esp_output_done_esn(struct crypto_async_request *base, int err)
@@ -121,6 +131,7 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
+       struct esp_output_extra *extra;
        struct ip_esp_hdr *esph;
        struct crypto_aead *aead;
        struct aead_request *req;
@@ -137,8 +148,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        int tfclen;
        int nfrags;
        int assoclen;
-       int seqhilen;
-       __be32 *seqhi;
+       int extralen;
        __be64 seqno;
 
        /* skb is pure payload to encrypt */
@@ -166,21 +176,21 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        nfrags = err;
 
        assoclen = sizeof(*esph);
-       seqhilen = 0;
+       extralen = 0;
 
        if (x->props.flags & XFRM_STATE_ESN) {
-               seqhilen += sizeof(__be32);
-               assoclen += seqhilen;
+               extralen += sizeof(*extra);
+               assoclen += sizeof(__be32);
        }
 
-       tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
+       tmp = esp_alloc_tmp(aead, nfrags, extralen);
        if (!tmp) {
                err = -ENOMEM;
                goto error;
        }
 
-       seqhi = esp_tmp_seqhi(tmp);
-       iv = esp_tmp_iv(aead, tmp, seqhilen);
+       extra = esp_tmp_extra(tmp);
+       iv = esp_tmp_iv(aead, tmp, extralen);
        req = esp_tmp_req(aead, iv);
        sg = esp_req_sg(aead, req);
 
@@ -247,8 +257,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
         * encryption.
         */
        if ((x->props.flags & XFRM_STATE_ESN)) {
-               esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
-               *seqhi = esph->spi;
+               extra->esphoff = (unsigned char *)esph -
+                                skb_transport_header(skb);
+               esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
+               extra->seqhi = esph->spi;
                esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
                aead_request_set_callback(req, 0, esp_output_done_esn, skb);
        }
@@ -445,7 +457,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
                goto out;
 
        ESP_SKB_CB(skb)->tmp = tmp;
-       seqhi = esp_tmp_seqhi(tmp);
+       seqhi = esp_tmp_extra(tmp);
        iv = esp_tmp_iv(aead, tmp, seqhilen);
        req = esp_tmp_req(aead, iv);
        sg = esp_req_sg(aead, req);
index d09173bf95005be87d4d0cbcc754147b3c8e62c9..539fa264e67d71148364c9fc0e694c78fd35e69b 100644 (file)
@@ -479,6 +479,9 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                if (!rtnh_ok(rtnh, remaining))
                        return -EINVAL;
 
+               if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+                       return -EINVAL;
+
                nexthop_nh->nh_flags =
                        (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
                nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
@@ -1003,6 +1006,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
                goto err_inval;
 
+       if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+               goto err_inval;
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        if (cfg->fc_mp) {
                nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
index 4c39f4fd332a3569267c5c1330e692c5f40a49e7..de1d119a4497e7d512cba30043f01bb6609f6f2d 100644 (file)
@@ -62,26 +62,26 @@ EXPORT_SYMBOL_GPL(gre_del_protocol);
 
 /* Fills in tpi and returns header length to be pulled. */
 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-                    bool *csum_err, __be16 proto)
+                    bool *csum_err, __be16 proto, int nhs)
 {
        const struct gre_base_hdr *greh;
        __be32 *options;
        int hdr_len;
 
-       if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+       if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr))))
                return -EINVAL;
 
-       greh = (struct gre_base_hdr *)skb_transport_header(skb);
+       greh = (struct gre_base_hdr *)(skb->data + nhs);
        if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
                return -EINVAL;
 
        tpi->flags = gre_flags_to_tnl_flags(greh->flags);
        hdr_len = gre_calc_hlen(tpi->flags);
 
-       if (!pskb_may_pull(skb, hdr_len))
+       if (!pskb_may_pull(skb, nhs + hdr_len))
                return -EINVAL;
 
-       greh = (struct gre_base_hdr *)skb_transport_header(skb);
+       greh = (struct gre_base_hdr *)(skb->data + nhs);
        tpi->proto = greh->protocol;
 
        options = (__be32 *)(greh + 1);
index 4d2025f7ec578b7e3f3fd342e82e734d36395068..1d000af7f5617e32e6dd8ec5e034a87fd449ab06 100644 (file)
 #include <net/gre.h>
 #include <net/dst_metadata.h>
 
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/ip6_fib.h>
-#include <net/ip6_route.h>
-#endif
-
 /*
    Problems & solutions
    --------------------
@@ -217,12 +211,14 @@ static void gre_err(struct sk_buff *skb, u32 info)
         * by themselves???
         */
 
+       const struct iphdr *iph = (struct iphdr *)skb->data;
        const int type = icmp_hdr(skb)->type;
        const int code = icmp_hdr(skb)->code;
        struct tnl_ptk_info tpi;
        bool csum_err = false;
 
-       if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) {
+       if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
+                            iph->ihl * 4) < 0) {
                if (!csum_err)          /* ignore csum errors. */
                        return;
        }
@@ -338,7 +334,7 @@ static int gre_rcv(struct sk_buff *skb)
        }
 #endif
 
-       hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP));
+       hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
        if (hdr_len < 0)
                goto drop;
 
@@ -1121,6 +1117,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
 {
        struct nlattr *tb[IFLA_MAX + 1];
        struct net_device *dev;
+       LIST_HEAD(list_kill);
        struct ip_tunnel *t;
        int err;
 
@@ -1136,8 +1133,10 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
        t->collect_md = true;
 
        err = ipgre_newlink(net, dev, tb, NULL);
-       if (err < 0)
-               goto out;
+       if (err < 0) {
+               free_netdev(dev);
+               return ERR_PTR(err);
+       }
 
        /* openvswitch users expect packet sizes to be unrestricted,
         * so set the largest MTU we can.
@@ -1146,9 +1145,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
        if (err)
                goto out;
 
+       err = rtnl_configure_link(dev, NULL);
+       if (err < 0)
+               goto out;
+
        return dev;
 out:
-       free_netdev(dev);
+       ip_tunnel_dellink(dev, &list_kill);
+       unregister_netdevice_many(&list_kill);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
index 124bf0a663283502deb03397343160d493a378b1..4bd4921639c3e6415f8899896f72fe1564a68c55 100644 (file)
@@ -271,7 +271,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
                return dst_output(net, sk, skb);
        }
 #endif
-       mtu = ip_skb_dst_mtu(skb);
+       mtu = ip_skb_dst_mtu(sk, skb);
        if (skb_is_gso(skb))
                return ip_finish_output_gso(net, sk, skb, mtu);
 
@@ -541,7 +541,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 
        iph = ip_hdr(skb);
 
-       mtu = ip_skb_dst_mtu(skb);
+       mtu = ip_skb_dst_mtu(sk, skb);
        if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
                mtu = IPCB(skb)->frag_max_size;
 
index 2ed9dd2b5f2f5a6ea3ea52208e5ec1424a501fc5..1d71c40eaaf35a74b1cec69d3ffdbd80b675aae9 100644 (file)
@@ -127,7 +127,9 @@ __be32 ic_myaddr = NONE;            /* My IP address */
 static __be32 ic_netmask = NONE;       /* Netmask for local subnet */
 __be32 ic_gateway = NONE;      /* Gateway IP address */
 
-__be32 ic_addrservaddr = NONE; /* IP Address of the IP addresses'server */
+#ifdef IPCONFIG_DYNAMIC
+static __be32 ic_addrservaddr = NONE;  /* IP Address of the IP addresses'server */
+#endif
 
 __be32 ic_servaddr = NONE;     /* Boot server IP address */
 
index 21a38e296fe2da2c4a095c94ab204362d9eacc89..5ad48ec7771007a0c01c8210001ecb17ab012863 100644 (file)
@@ -891,8 +891,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
 {
        struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
 
-       if (c)
+       if (c) {
+               c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
                c->mfc_un.res.minvif = MAXVIFS;
+       }
        return c;
 }
 
index bb0419582b8dfed626f04752307ddaf2671ed01c..1cb67de106fee1103aa487af1f889ae6aea0c80c 100644 (file)
@@ -999,10 +999,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
        if (!net->ipv4.sysctl_local_reserved_ports)
                goto err_ports;
 
-       net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
-       net->ipv4.sysctl_ip_dynaddr = 0;
-       net->ipv4.sysctl_ip_early_demux = 1;
-
        return 0;
 
 err_ports:
index d6c8f4cd080001a527f7c137021cc6a3f3604344..42bf89aaf6a5206cf384068a0a50a3130abe2a4e 100644 (file)
@@ -87,7 +87,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 
 /* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
 
 int sysctl_tcp_stdurg __read_mostly;
 int sysctl_tcp_rfc1337 __read_mostly;
@@ -3421,6 +3421,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
        return flag;
 }
 
+static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
+                                  u32 *last_oow_ack_time)
+{
+       if (*last_oow_ack_time) {
+               s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+               if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+                       NET_INC_STATS(net, mib_idx);
+                       return true;    /* rate-limited: don't send yet! */
+               }
+       }
+
+       *last_oow_ack_time = tcp_time_stamp;
+
+       return false;   /* not rate-limited: go ahead, send dupack now! */
+}
+
 /* Return true if we're currently rate-limiting out-of-window ACKs and
  * thus shouldn't send a dupack right now. We rate-limit dupacks in
  * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
@@ -3434,21 +3451,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
        /* Data packets without SYNs are not likely part of an ACK loop. */
        if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
            !tcp_hdr(skb)->syn)
-               goto not_rate_limited;
-
-       if (*last_oow_ack_time) {
-               s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
-               if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
-                       NET_INC_STATS(net, mib_idx);
-                       return true;    /* rate-limited: don't send yet! */
-               }
-       }
-
-       *last_oow_ack_time = tcp_time_stamp;
+               return false;
 
-not_rate_limited:
-       return false;   /* not rate-limited: go ahead, send dupack now! */
+       return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
 }
 
 /* RFC 5961 7 [ACK Throttling] */
@@ -3458,21 +3463,26 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
        static u32 challenge_timestamp;
        static unsigned int challenge_count;
        struct tcp_sock *tp = tcp_sk(sk);
-       u32 now;
+       u32 count, now;
 
        /* First check our per-socket dupack rate limit. */
-       if (tcp_oow_rate_limited(sock_net(sk), skb,
-                                LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
-                                &tp->last_oow_ack_time))
+       if (__tcp_oow_rate_limited(sock_net(sk),
+                                  LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+                                  &tp->last_oow_ack_time))
                return;
 
-       /* Then check the check host-wide RFC 5961 rate limit. */
+       /* Then check host-wide RFC 5961 rate limit. */
        now = jiffies / HZ;
        if (now != challenge_timestamp) {
+               u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
                challenge_timestamp = now;
-               challenge_count = 0;
+               WRITE_ONCE(challenge_count, half +
+                          prandom_u32_max(sysctl_tcp_challenge_ack_limit));
        }
-       if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+       count = READ_ONCE(challenge_count);
+       if (count > 0) {
+               WRITE_ONCE(challenge_count, count - 1);
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
                tcp_send_ack(sk);
        }
index 8bd9911fdd163ab739d2dfda34e053b5e8e0bc03..e00e972c4e6a750d4d01bce3c1bc6125eb49f102 100644 (file)
@@ -2751,7 +2751,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        struct sk_buff *hole = NULL;
-       u32 last_lost;
+       u32 max_segs, last_lost;
        int mib_idx;
        int fwd_rexmitting = 0;
 
@@ -2771,6 +2771,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                last_lost = tp->snd_una;
        }
 
+       max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
        tcp_for_write_queue_from(skb, sk) {
                __u8 sacked = TCP_SKB_CB(skb)->sacked;
                int segs;
@@ -2784,6 +2785,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
                if (segs <= 0)
                        return;
+               /* In case tcp_shift_skb_data() have aggregated large skbs,
+                * we need to make sure not sending too bigs TSO packets
+                */
+               segs = min_t(int, segs, max_segs);
 
                if (fwd_rexmitting) {
 begin_fwd:
index d56c0559b477cb96ce78c9b9b7dacc3109594f3a..4aed8fc23d328592f8cf267fca70582f62fc6a3e 100644 (file)
@@ -391,9 +391,9 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
        return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
 }
 
-static inline int compute_score(struct sock *sk, struct net *net,
-                               __be32 saddr, unsigned short hnum, __be16 sport,
-                               __be32 daddr, __be16 dport, int dif)
+static int compute_score(struct sock *sk, struct net *net,
+                        __be32 saddr, __be16 sport,
+                        __be32 daddr, unsigned short hnum, int dif)
 {
        int score;
        struct inet_sock *inet;
@@ -434,52 +434,6 @@ static inline int compute_score(struct sock *sk, struct net *net,
        return score;
 }
 
-/*
- * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
- */
-static inline int compute_score2(struct sock *sk, struct net *net,
-                                __be32 saddr, __be16 sport,
-                                __be32 daddr, unsigned int hnum, int dif)
-{
-       int score;
-       struct inet_sock *inet;
-
-       if (!net_eq(sock_net(sk), net) ||
-           ipv6_only_sock(sk))
-               return -1;
-
-       inet = inet_sk(sk);
-
-       if (inet->inet_rcv_saddr != daddr ||
-           inet->inet_num != hnum)
-               return -1;
-
-       score = (sk->sk_family == PF_INET) ? 2 : 1;
-
-       if (inet->inet_daddr) {
-               if (inet->inet_daddr != saddr)
-                       return -1;
-               score += 4;
-       }
-
-       if (inet->inet_dport) {
-               if (inet->inet_dport != sport)
-                       return -1;
-               score += 4;
-       }
-
-       if (sk->sk_bound_dev_if) {
-               if (sk->sk_bound_dev_if != dif)
-                       return -1;
-               score += 4;
-       }
-
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
-               score++;
-
-       return score;
-}
-
 static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
                       const __u16 lport, const __be32 faddr,
                       const __be16 fport)
@@ -492,11 +446,11 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
                              udp_ehash_secret + net_hash_mix(net));
 }
 
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
 static struct sock *udp4_lib_lookup2(struct net *net,
                __be32 saddr, __be16 sport,
                __be32 daddr, unsigned int hnum, int dif,
-               struct udp_hslot *hslot2, unsigned int slot2,
+               struct udp_hslot *hslot2,
                struct sk_buff *skb)
 {
        struct sock *sk, *result;
@@ -506,7 +460,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
        result = NULL;
        badness = 0;
        udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
-               score = compute_score2(sk, net, saddr, sport,
+               score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
@@ -554,17 +508,22 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 
                result = udp4_lib_lookup2(net, saddr, sport,
                                          daddr, hnum, dif,
-                                         hslot2, slot2, skb);
+                                         hslot2, skb);
                if (!result) {
+                       unsigned int old_slot2 = slot2;
                        hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
                        slot2 = hash2 & udptable->mask;
+                       /* avoid searching the same slot again. */
+                       if (unlikely(slot2 == old_slot2))
+                               return result;
+
                        hslot2 = &udptable->hash2[slot2];
                        if (hslot->count < hslot2->count)
                                goto begin;
 
                        result = udp4_lib_lookup2(net, saddr, sport,
-                                                 htonl(INADDR_ANY), hnum, dif,
-                                                 hslot2, slot2, skb);
+                                                 daddr, hnum, dif,
+                                                 hslot2, skb);
                }
                return result;
        }
@@ -572,8 +531,8 @@ begin:
        result = NULL;
        badness = 0;
        sk_for_each_rcu(sk, &hslot->head) {
-               score = compute_score(sk, net, saddr, hnum, sport,
-                                     daddr, dport, dif);
+               score = compute_score(sk, net, saddr, sport,
+                                     daddr, hnum, dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@ -1618,12 +1577,14 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       if (rcu_access_pointer(sk->sk_filter)) {
-               if (udp_lib_checksum_complete(skb))
+       if (rcu_access_pointer(sk->sk_filter) &&
+           udp_lib_checksum_complete(skb))
                        goto csum_error;
-               if (sk_filter(sk, skb))
-                       goto drop;
-       }
+
+       if (sk_filter(sk, skb))
+               goto drop;
+       if (unlikely(skb->len < sizeof(struct udphdr)))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
@@ -1755,8 +1716,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
                        return err;
        }
 
-       return skb_checksum_init_zero_check(skb, proto, uh->check,
-                                           inet_compute_pseudo);
+       /* Note, we are only interested in != 0 or == 0, thus the
+        * force to int.
+        */
+       return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+                                                        inet_compute_pseudo);
 }
 
 /*
index 3f8411328de54d2101433f89fea20bef62cefa9a..2343e4f2e0bfce5c97978a06a601fca14b41610a 100644 (file)
@@ -232,6 +232,15 @@ config IPV6_GRE
 
          Saying M here will produce a module called ip6_gre. If unsure, say N.
 
+config IPV6_FOU
+       tristate
+       default NET_FOU && IPV6
+
+config IPV6_FOU_TUNNEL
+       tristate
+       default NET_FOU_IP_TUNNELS && IPV6_FOU
+       select IPV6_TUNNEL
+
 config IPV6_MULTIPLE_TABLES
        bool "IPv6: Multiple Routing Tables"
        select FIB_RULES
index 7ec3129c9ace062476c219eb693ca659b2b565c4..6d8ea099213e5084927833570e326fcf2cef628a 100644 (file)
@@ -42,7 +42,7 @@ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
 obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
-obj-$(CONFIG_NET_FOU) += fou6.o
+obj-$(CONFIG_IPV6_FOU) += fou6.o
 
 obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
 obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
index c972d0b52579018b59ed1a99f67f232be9866e65..9ea249b9451e902ecbb01f1fc480049b262e8170 100644 (file)
@@ -69,7 +69,7 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
 }
 EXPORT_SYMBOL(gue6_build_header);
 
-#ifdef CONFIG_NET_FOU_IP_TUNNELS
+#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL)
 
 static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
        .encap_hlen = fou_encap_hlen,
index 4527285fcaa2c2c8134c089b88e0cfeeeddad292..a4fa840769690a271b259aee1630fb061bf0ae92 100644 (file)
@@ -98,7 +98,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        if (!(type & ICMPV6_INFOMSG_MASK))
                if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
-                       ping_err(skb, offset, info);
+                       ping_err(skb, offset, ntohl(info));
 }
 
 static int icmpv6_rcv(struct sk_buff *skb);
index b2025bf3da4af20a19b61de59808d3ca91eead51..c0cbcb259f5a9acf9157d4626c61f532d8077855 100644 (file)
@@ -78,9 +78,12 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
         * we accept a checksum of zero here. When we find the socket
         * for the UDP packet we'll check if that socket allows zero checksum
         * for IPv6 (set by socket option).
+        *
+        * Note, we are only interested in != 0 or == 0, thus the
+        * force to int.
         */
-       return skb_checksum_init_zero_check(skb, proto, uh->check,
-                                          ip6_compute_pseudo);
+       return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
+                                                        ip6_compute_pseudo);
 }
 EXPORT_SYMBOL(udp6_csum_init);
 
index 1bcef2369d64e6f1325dcab50c14601e6ca5a40a..771be1fa41764aa8ea3b570a058ee84b109903b9 100644 (file)
@@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
                }
        }
 
+       free_percpu(non_pcpu_rt->rt6i_pcpu);
        non_pcpu_rt->rt6i_pcpu = NULL;
 }
 
index af503f518278b21f274afe355acbe6815492d15b..776d145113e138872f45d97e7f66ff0416762d85 100644 (file)
@@ -468,7 +468,7 @@ static int gre_rcv(struct sk_buff *skb)
        bool csum_err = false;
        int hdr_len;
 
-       hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6));
+       hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
        if (hdr_len < 0)
                goto drop;
 
@@ -712,6 +712,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
        fl6->daddr = p->raddr;
        fl6->flowi6_oif = p->link;
        fl6->flowlabel = 0;
+       fl6->flowi6_proto = IPPROTO_GRE;
 
        if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
                fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
@@ -1027,6 +1028,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
 
        dev->hard_header_len = LL_MAX_HEADER + t_hlen;
        dev->mtu = ETH_DATA_LEN - t_hlen;
+       if (dev->type == ARPHRD_ETHER)
+               dev->mtu -= ETH_HLEN;
        if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
                dev->mtu -= 8;
 
@@ -1253,6 +1256,8 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (ret)
                return ret;
 
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
        tunnel = netdev_priv(dev);
 
        ip6gre_tnl_link_config(tunnel, 1);
@@ -1286,6 +1291,7 @@ static void ip6gre_tap_setup(struct net_device *dev)
 
        dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 }
 
 static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
index cbf127ae7c676650cc626cbf12cd61b6b570ea43..635b8d340cdbbbce62a173ed4016b6c5c81fc6b9 100644 (file)
@@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
                                         const struct in6_addr *final_dst)
 {
        struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
-       int err;
 
        dst = ip6_sk_dst_check(sk, dst, fl6);
+       if (!dst)
+               dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
 
-       err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
-       if (err)
-               return ERR_PTR(err);
-       if (final_dst)
-               fl6->daddr = *final_dst;
-
-       return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
+       return dst;
 }
 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
 
index f2e2013f834621fe2598d9dd9b452b83cd4f9018..487ef3bc7bbcde7f3509b76b3a68aee9d2ab7e8b 100644 (file)
@@ -1074,6 +1074,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
        struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
        if (!c)
                return NULL;
+       c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
        c->mfc_un.res.minvif = MAXMIFS;
        return c;
 }
index 6989c70ae29f1541e0216d36d58ee142934a83c2..4a84b5ad9ecbb74b29ef509d00fbda60868833cd 100644 (file)
@@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
        fl6.daddr = *gw;
        fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
                        (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
+       fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error) {
                dst_release(dst);
index 969913da494fdf1d80ce674c3b6c421fdab18d3d..520b7884d0c2b55ab6f5fe4b0e0c86fcad8aab08 100644 (file)
@@ -1782,7 +1782,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
        };
        struct fib6_table *table;
        struct rt6_info *rt;
-       int flags = 0;
+       int flags = RT6_LOOKUP_F_IFACE;
 
        table = fib6_get_table(net, cfg->fc_table);
        if (!table)
index 0a5a255277e562ecb11ab703d964143f21bf85cd..0619ac70836d4ea5ed4e43a1f5c3ac1f870979c4 100644 (file)
@@ -560,13 +560,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
                ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->parms.link, 0, IPPROTO_IPV6, 0);
+                                t->parms.link, 0, iph->protocol, 0);
                err = 0;
                goto out;
        }
        if (type == ICMP_REDIRECT) {
                ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
-                             IPPROTO_IPV6, 0);
+                             iph->protocol, 0);
                err = 0;
                goto out;
        }
index 79e33e02f11accfd5a6c8cf2751df0a8793ab016..2255d2bf5f6bdb532c51eff6792b3024b3eeefa1 100644 (file)
@@ -738,7 +738,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
                                 u32 ack, u32 win, u32 tsval, u32 tsecr,
                                 int oif, struct tcp_md5sig_key *key, int rst,
-                                u8 tclass, u32 label)
+                                u8 tclass, __be32 label)
 {
        const struct tcphdr *th = tcp_hdr(skb);
        struct tcphdr *t1;
@@ -911,7 +911,7 @@ out:
 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
                            u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
                            struct tcp_md5sig_key *key, u8 tclass,
-                           u32 label)
+                           __be32 label)
 {
        tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
                             tclass, label);
@@ -1721,7 +1721,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
        destp = ntohs(inet->inet_dport);
        srcp  = ntohs(inet->inet_sport);
 
-       if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                timer_active    = 1;
                timer_expires   = icsk->icsk_timeout;
        } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
index 2da1896af93496cc58762c67b4cd4b4f42924901..acc09705618b4cccd622122f58ce4ab1639f22d1 100644 (file)
@@ -115,11 +115,10 @@ static void udp_v6_rehash(struct sock *sk)
        udp_lib_rehash(sk, new_hash);
 }
 
-static inline int compute_score(struct sock *sk, struct net *net,
-                               unsigned short hnum,
-                               const struct in6_addr *saddr, __be16 sport,
-                               const struct in6_addr *daddr, __be16 dport,
-                               int dif)
+static int compute_score(struct sock *sk, struct net *net,
+                        const struct in6_addr *saddr, __be16 sport,
+                        const struct in6_addr *daddr, unsigned short hnum,
+                        int dif)
 {
        int score;
        struct inet_sock *inet;
@@ -162,54 +161,11 @@ static inline int compute_score(struct sock *sk, struct net *net,
        return score;
 }
 
-static inline int compute_score2(struct sock *sk, struct net *net,
-                                const struct in6_addr *saddr, __be16 sport,
-                                const struct in6_addr *daddr,
-                                unsigned short hnum, int dif)
-{
-       int score;
-       struct inet_sock *inet;
-
-       if (!net_eq(sock_net(sk), net) ||
-           udp_sk(sk)->udp_port_hash != hnum ||
-           sk->sk_family != PF_INET6)
-               return -1;
-
-       if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
-               return -1;
-
-       score = 0;
-       inet = inet_sk(sk);
-
-       if (inet->inet_dport) {
-               if (inet->inet_dport != sport)
-                       return -1;
-               score++;
-       }
-
-       if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
-               if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
-                       return -1;
-               score++;
-       }
-
-       if (sk->sk_bound_dev_if) {
-               if (sk->sk_bound_dev_if != dif)
-                       return -1;
-               score++;
-       }
-
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
-               score++;
-
-       return score;
-}
-
-/* called with read_rcu_lock() */
+/* called with rcu_read_lock() */
 static struct sock *udp6_lib_lookup2(struct net *net,
                const struct in6_addr *saddr, __be16 sport,
                const struct in6_addr *daddr, unsigned int hnum, int dif,
-               struct udp_hslot *hslot2, unsigned int slot2,
+               struct udp_hslot *hslot2,
                struct sk_buff *skb)
 {
        struct sock *sk, *result;
@@ -219,7 +175,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
        result = NULL;
        badness = -1;
        udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
-               score = compute_score2(sk, net, saddr, sport,
+               score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
@@ -268,17 +224,22 @@ struct sock *__udp6_lib_lookup(struct net *net,
 
                result = udp6_lib_lookup2(net, saddr, sport,
                                          daddr, hnum, dif,
-                                         hslot2, slot2, skb);
+                                         hslot2, skb);
                if (!result) {
+                       unsigned int old_slot2 = slot2;
                        hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
                        slot2 = hash2 & udptable->mask;
+                       /* avoid searching the same slot again. */
+                       if (unlikely(slot2 == old_slot2))
+                               return result;
+
                        hslot2 = &udptable->hash2[slot2];
                        if (hslot->count < hslot2->count)
                                goto begin;
 
                        result = udp6_lib_lookup2(net, saddr, sport,
-                                                 &in6addr_any, hnum, dif,
-                                                 hslot2, slot2, skb);
+                                                 daddr, hnum, dif,
+                                                 hslot2, skb);
                }
                return result;
        }
@@ -286,7 +247,7 @@ begin:
        result = NULL;
        badness = -1;
        sk_for_each_rcu(sk, &hslot->head) {
-               score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
+               score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@ -653,12 +614,14 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                }
        }
 
-       if (rcu_access_pointer(sk->sk_filter)) {
-               if (udp_lib_checksum_complete(skb))
-                       goto csum_error;
-               if (sk_filter(sk, skb))
-                       goto drop;
-       }
+       if (rcu_access_pointer(sk->sk_filter) &&
+           udp_lib_checksum_complete(skb))
+               goto csum_error;
+
+       if (sk_filter(sk, skb))
+               goto drop;
+       if (unlikely(skb->len < sizeof(struct udphdr)))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
index 738008726cc6f74710382ea1771a5a6118aa7a94..fda7f4715c58fa4f457f4410e1e3998819a1e77e 100644 (file)
@@ -241,6 +241,7 @@ static const struct file_operations kcm_seq_fops = {
        .open           = kcm_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
+       .release        = seq_release_net,
 };
 
 static struct kcm_seq_muxinfo kcm_seq_muxinfo = {
index 6edfa99803148815e383eb13ce6a9c1eb098058d..1e40dacaa137f00df3df79c0faf4eb4de65e86c2 100644 (file)
@@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
        tunnel->encap = encap;
        if (encap == L2TP_ENCAPTYPE_UDP) {
-               struct udp_tunnel_sock_cfg udp_cfg;
+               struct udp_tunnel_sock_cfg udp_cfg = { };
 
                udp_cfg.sk_user_data = tunnel;
                udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
index c6f5df1bed121e2b701c50542bddb6e403b1e1d8..6c54e03fe9c1751217e246f31d6ca1f322762539 100644 (file)
@@ -128,6 +128,7 @@ static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
  */
 static int l2tp_ip6_recv(struct sk_buff *skb)
 {
+       struct net *net = dev_net(skb->dev);
        struct sock *sk;
        u32 session_id;
        u32 tunnel_id;
@@ -154,7 +155,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
        }
 
        /* Ok, this is a data packet. Lookup the session. */
-       session = l2tp_session_find(&init_net, NULL, session_id);
+       session = l2tp_session_find(net, NULL, session_id);
        if (session == NULL)
                goto discard;
 
@@ -188,14 +189,14 @@ pass_up:
                goto discard;
 
        tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
-       tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
+       tunnel = l2tp_tunnel_find(net, tunnel_id);
        if (tunnel != NULL)
                sk = tunnel->sock;
        else {
                struct ipv6hdr *iph = ipv6_hdr(skb);
 
                read_lock_bh(&l2tp_ip6_lock);
-               sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
+               sk = __l2tp_ip6_bind_lookup(net, &iph->daddr,
                                            0, tunnel_id);
                read_unlock_bh(&l2tp_ip6_lock);
        }
@@ -263,6 +264,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
+       struct net *net = sock_net(sk);
        __be32 v4addr = 0;
        int addr_type;
        int err;
@@ -286,7 +288,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        err = -EADDRINUSE;
        read_lock_bh(&l2tp_ip6_lock);
-       if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
+       if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr,
                                   sk->sk_bound_dev_if, addr->l2tp_conn_id))
                goto out_in_use;
        read_unlock_bh(&l2tp_ip6_lock);
@@ -456,7 +458,7 @@ static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
        return 0;
 
 drop:
-       IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
+       IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
        kfree_skb(skb);
        return -1;
 }
index 5dba899131b35bbe83835fc9e897f7e2f2332744..182470847fcf739e5906cbbc3be44f97fe4a5f8f 100644 (file)
@@ -444,10 +444,9 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
                break;
 
        case LAPB_FRMR:
-               lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n",
+               lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n",
                         lapb->dev, frame->pf,
-                        skb->data[0], skb->data[1], skb->data[2],
-                        skb->data[3], skb->data[4]);
+                        skb->data);
                lapb_establish_data_link(lapb);
                lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev);
                lapb_requeue_frames(lapb);
index ba4d015bd1a67f0d3f1ede17ff7989f18de5414f..482c94d9d958a6c32885626c6ed86ec59e2c9c79 100644 (file)
@@ -148,9 +148,7 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
                }
        }
 
-       lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n",
-                lapb->dev, lapb->state,
-                skb->data[0], skb->data[1], skb->data[2]);
+       lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data);
 
        if (!lapb_data_transmit(lapb, skb))
                kfree_skb(skb);
index 9d0a426eccbb0296f5b25f12c7ddf4d79820f37f..3c1914df641f28d9a7298b30e6a2f14b50ed9092 100644 (file)
@@ -113,9 +113,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb,
 {
        frame->type = LAPB_ILLEGAL;
 
-       lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n",
-                lapb->dev, lapb->state,
-                skb->data[0], skb->data[1], skb->data[2]);
+       lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data);
 
        /* We always need to look at 2 bytes, sometimes we need
         * to look at 3 and those cases are handled below.
@@ -284,10 +282,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
                dptr++;
                *dptr++ = lapb->frmr_type;
 
-               lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n",
+               lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n",
                         lapb->dev, lapb->state,
-                        skb->data[1], skb->data[2], skb->data[3],
-                        skb->data[4], skb->data[5]);
+                        &skb->data[1]);
        } else {
                dptr    = skb_put(skb, 4);
                *dptr++ = LAPB_FRMR;
@@ -299,9 +296,8 @@ void lapb_transmit_frmr(struct lapb_cb *lapb)
                dptr++;
                *dptr++ = lapb->frmr_type;
 
-               lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n",
-                        lapb->dev, lapb->state, skb->data[1],
-                        skb->data[2], skb->data[3]);
+               lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n",
+                        lapb->dev, lapb->state, &skb->data[1]);
        }
 
        lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE);
index 4c6404e1ad6e683da649f507829c19b232fcd417..6a1603bcdcedbccd99627d09d2b0367b361e1454 100644 (file)
@@ -148,19 +148,26 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
 void mesh_sta_cleanup(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       u32 changed;
+       u32 changed = 0;
 
        /*
         * maybe userspace handles peer allocation and peering, but in either
         * case the beacon is still generated by the kernel and we might need
         * an update.
         */
-       changed = mesh_accept_plinks_update(sdata);
+       if (sdata->u.mesh.user_mpm &&
+           sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+               changed |= mesh_plink_dec_estab_count(sdata);
+       changed |= mesh_accept_plinks_update(sdata);
        if (!sdata->u.mesh.user_mpm) {
                changed |= mesh_plink_deactivate(sta);
                del_timer_sync(&sta->mesh->plink_timer);
        }
 
+       /* make sure no readers can access nexthop sta from here on */
+       mesh_path_flush_by_nexthop(sta);
+       synchronize_net();
+
        if (changed)
                ieee80211_mbss_info_change_notify(sdata, changed);
 }
index c8b8ccc370eb70c75ab43a8e013e60c1f280debd..78b0ef32ddddd6768377bf49da5acdfabf7e762f 100644 (file)
@@ -280,7 +280,7 @@ struct ieee80211_fast_tx {
        u8 sa_offs, da_offs, pn_offs;
        u8 band;
        u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
-              sizeof(rfc1042_header)];
+              sizeof(rfc1042_header)] __aligned(2);
 
        struct rcu_head rcu_head;
 };
index 2cb3c626cd4307a51e06b9f0bf5c7b92cdb961a9..096a45103f14cbbaccc07b7574b9fcfc7e55ff92 100644 (file)
@@ -762,7 +762,7 @@ static int expire_quiescent_template(struct netns_ipvs *ipvs,
  *     If available, return 1, otherwise invalidate this connection
  *     template and return 0.
  */
-int ip_vs_check_template(struct ip_vs_conn *ct)
+int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest)
 {
        struct ip_vs_dest *dest = ct->dest;
        struct netns_ipvs *ipvs = ct->ipvs;
@@ -772,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
         */
        if ((dest == NULL) ||
            !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
-           expire_quiescent_template(ipvs, dest)) {
+           expire_quiescent_template(ipvs, dest) ||
+           (cdest && (dest != cdest))) {
                IP_VS_DBG_BUF(9, "check_template: dest not available for "
                              "protocol %s s:%s:%d v:%s:%d "
                              "-> d:%s:%d\n",
index 1207f20d24e4a25050363e2d6b9e30cc6aaf03b5..2c1b498a7a271df0f6e3ffa86407b89ba103e9d9 100644 (file)
@@ -321,7 +321,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
 
        /* Check if a template already exists */
        ct = ip_vs_ct_in_get(&param);
-       if (!ct || !ip_vs_check_template(ct)) {
+       if (!ct || !ip_vs_check_template(ct, NULL)) {
                struct ip_vs_scheduler *sched;
 
                /*
@@ -1154,7 +1154,8 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc,
                                                  vport, &param) < 0)
                        return NULL;
                ct = ip_vs_ct_in_get(&param);
-               if (!ct) {
+               /* check if template exists and points to the same dest */
+               if (!ct || !ip_vs_check_template(ct, dest)) {
                        ct = ip_vs_conn_new(&param, dest->af, daddr, dport,
                                            IP_VS_CONN_F_TEMPLATE, dest, 0);
                        if (!ct) {
index 803001a45aa16e6b5a372ab385dba8e9c09bd2f0..1b07578bedf336c53e3b6072c8c3324f7f18081b 100644 (file)
@@ -1545,7 +1545,8 @@ error:
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
+static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+                                       int ifindex)
 {
        /* multicast addr */
        union ipvs_sockaddr mcast_addr;
@@ -1566,6 +1567,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
                set_sock_size(sock->sk, 0, result);
 
        get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+       sock->sk->sk_bound_dev_if = ifindex;
        result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
        if (result < 0) {
                pr_err("Error binding to the multicast addr\n");
@@ -1868,7 +1870,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
                if (state == IP_VS_STATE_MASTER)
                        sock = make_send_sock(ipvs, id);
                else
-                       sock = make_receive_sock(ipvs, id);
+                       sock = make_receive_sock(ipvs, id, dev->ifindex);
                if (IS_ERR(sock)) {
                        result = PTR_ERR(sock);
                        goto outtinfo;
index db2312eeb2a47c44db0f0ac5a529a10a0a8f8d2f..9f530adad10d54a9277a9fbfdee4236055e3f99a 100644 (file)
@@ -646,6 +646,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
 
        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
        if (l4proto->allow_clash &&
+           !nfct_nat(ct) &&
            !nf_ct_is_dying(ct) &&
            atomic_inc_not_zero(&ct->ct_general.use)) {
                nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
@@ -1544,6 +1545,8 @@ void nf_conntrack_cleanup_end(void)
        nf_conntrack_tstamp_fini();
        nf_conntrack_acct_fini();
        nf_conntrack_expect_fini();
+
+       kmem_cache_destroy(nf_conntrack_cachep);
 }
 
 /*
@@ -1599,8 +1602,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
        unsigned int nr_slots, i;
        size_t sz;
 
+       if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+               return NULL;
+
        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+
+       if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+               return NULL;
+
        sz = nr_slots * sizeof(struct hlist_nulls_head);
        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                                        get_order(sz));
index 883c691ec8d05724d8a63734bba9a3a30ae06eb2..19efeba02abb0e8db4d4b9b0f7827d2d3808c69b 100644 (file)
@@ -632,6 +632,7 @@ static int __init nf_conntrack_ftp_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %d port: %d\n",
                                       ftp[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_ftp_fini();
                                return ret;
                        }
index f703adb7e5f7166ca47380aedd8ad0074e361ee4..196cb39649e1ec186108acfc417ae2d7bf8a0bb2 100644 (file)
@@ -361,9 +361,10 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_log);
 
 int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
 {
-       int ret = 0;
-       struct nf_conntrack_helper *cur;
+       struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
        unsigned int h = helper_hash(&me->tuple);
+       struct nf_conntrack_helper *cur;
+       int ret = 0;
 
        BUG_ON(me->expect_policy == NULL);
        BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
@@ -371,9 +372,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
 
        mutex_lock(&nf_ct_helper_mutex);
        hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) {
-               if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
-                   cur->tuple.src.l3num == me->tuple.src.l3num &&
-                   cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+               if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) {
                        ret = -EEXIST;
                        goto out;
                }
index 8b6da27196005f61d2269d8af92124096c85047e..f97ac61d2536aac5d0c73d6e86b1a6df36bc60fd 100644 (file)
@@ -271,6 +271,7 @@ static int __init nf_conntrack_irc_init(void)
                if (ret) {
                        pr_err("failed to register helper for pf: %u port: %u\n",
                               irc[i].tuple.src.l3num, ports[i]);
+                       ports_c = i;
                        nf_conntrack_irc_fini();
                        return ret;
                }
index 7523a575f6d16ded0c5724156cfe280598c5c505..3fcbaab83b3d34c728c2e55f55746156e78d2d89 100644 (file)
@@ -223,6 +223,7 @@ static int __init nf_conntrack_sane_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %d port: %d\n",
                                       sane[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_sane_fini();
                                return ret;
                        }
index 3e06402739e0acfd946fc8de04116aef0feea4ad..f72ba5587588fdaabb02232cbb33a0500ee2d422 100644 (file)
@@ -1669,6 +1669,7 @@ static int __init nf_conntrack_sip_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %u port: %u\n",
                                       sip[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_sip_fini();
                                return ret;
                        }
index f87e84ebcec3ac78cc4b941ec8828fab8c623f55..c026c472ea80d8dae3b57e6486c62d56903bca4e 100644 (file)
@@ -487,8 +487,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
        { }
 };
 
-#define NET_NF_CONNTRACK_MAX 2089
-
 static struct ctl_table nf_ct_netfilter_table[] = {
        {
                .procname       = "nf_conntrack_max",
index 36f9640664615bee6a640e11f4f18d1459952a81..2e65b5430fba6d09fa060aef44099360c033096c 100644 (file)
@@ -142,6 +142,7 @@ static int __init nf_conntrack_tftp_init(void)
                        if (ret) {
                                pr_err("failed to register helper for pf: %u port: %u\n",
                                       tftp[i][j].tuple.src.l3num, ports[i]);
+                               ports_c = i;
                                nf_conntrack_tftp_fini();
                                return ret;
                        }
index 5baa8e24e6ac1b512250c676cf0caf115f0b17f9..b19ad20a705ca7845ad21a5d272accba190f3f38 100644 (file)
  * Once the queue is registered it must reinject all packets it
  * receives, no matter what.
  */
-static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
 
 /* return EBUSY when somebody else is registered, return EEXIST if the
  * same handler is registered, return 0 in case of success. */
-void nf_register_queue_handler(const struct nf_queue_handler *qh)
+void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
 {
        /* should never happen, we only have one queueing backend in kernel */
-       WARN_ON(rcu_access_pointer(queue_handler));
-       rcu_assign_pointer(queue_handler, qh);
+       WARN_ON(rcu_access_pointer(net->nf.queue_handler));
+       rcu_assign_pointer(net->nf.queue_handler, qh);
 }
 EXPORT_SYMBOL(nf_register_queue_handler);
 
 /* The caller must flush their queue before this */
-void nf_unregister_queue_handler(void)
+void nf_unregister_queue_handler(struct net *net)
 {
-       RCU_INIT_POINTER(queue_handler, NULL);
-       synchronize_rcu();
+       RCU_INIT_POINTER(net->nf.queue_handler, NULL);
 }
 EXPORT_SYMBOL(nf_unregister_queue_handler);
 
@@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
        const struct nf_queue_handler *qh;
 
        rcu_read_lock();
-       qh = rcu_dereference(queue_handler);
+       qh = rcu_dereference(net->nf.queue_handler);
        if (qh)
                qh->nf_hook_drop(net, ops);
        rcu_read_unlock();
@@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb,
        struct nf_queue_entry *entry = NULL;
        const struct nf_afinfo *afinfo;
        const struct nf_queue_handler *qh;
+       struct net *net = state->net;
 
        /* QUEUE == DROP if no one is waiting, to be safe. */
-       qh = rcu_dereference(queue_handler);
+       qh = rcu_dereference(net->nf.queue_handler);
        if (!qh) {
                status = -ESRCH;
                goto err;
index 4d292b933b5c5c83d205f844f1a876ebefca708c..cf7c74599cbe5e6b800b584bee75d60f62d84a73 100644 (file)
@@ -1724,9 +1724,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
 
        err = nf_tables_newexpr(ctx, &info, expr);
        if (err < 0)
-               goto err2;
+               goto err3;
 
        return expr;
+err3:
+       kfree(expr);
 err2:
        module_put(info.ops->type->owner);
 err1:
@@ -2647,6 +2649,8 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
        /* Only accept unspec with dump */
        if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
                return -EAFNOSUPPORT;
+       if (!nla[NFTA_SET_TABLE])
+               return -EINVAL;
 
        set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
        if (IS_ERR(set))
@@ -2944,24 +2948,20 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                 * jumps are already validated for that chain.
                 */
                list_for_each_entry(i, &set->bindings, list) {
-                       if (binding->flags & NFT_SET_MAP &&
+                       if (i->flags & NFT_SET_MAP &&
                            i->chain == binding->chain)
                                goto bind;
                }
 
+               iter.genmask    = nft_genmask_next(ctx->net);
                iter.skip       = 0;
                iter.count      = 0;
                iter.err        = 0;
                iter.fn         = nf_tables_bind_check_setelem;
 
                set->ops->walk(ctx, set, &iter);
-               if (iter.err < 0) {
-                       /* Destroy anonymous sets if binding fails */
-                       if (set->flags & NFT_SET_ANONYMOUS)
-                               nf_tables_set_destroy(ctx, set);
-
+               if (iter.err < 0)
                        return iter.err;
-               }
        }
 bind:
        binding->chain = ctx->chain;
@@ -3190,12 +3190,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
        if (nest == NULL)
                goto nla_put_failure;
 
-       args.cb         = cb;
-       args.skb        = skb;
-       args.iter.skip  = cb->args[0];
-       args.iter.count = 0;
-       args.iter.err   = 0;
-       args.iter.fn    = nf_tables_dump_setelem;
+       args.cb                 = cb;
+       args.skb                = skb;
+       args.iter.genmask       = nft_genmask_cur(ctx.net);
+       args.iter.skip          = cb->args[0];
+       args.iter.count         = 0;
+       args.iter.err           = 0;
+       args.iter.fn            = nf_tables_dump_setelem;
        set->ops->walk(&ctx, set, &args.iter);
 
        nla_nest_end(skb, nest);
@@ -4282,6 +4283,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                            binding->chain != chain)
                                continue;
 
+                       iter.genmask    = nft_genmask_next(ctx->net);
                        iter.skip       = 0;
                        iter.count      = 0;
                        iter.err        = 0;
index e9f8dffcc244573e0fe209a9f03318f2bc53968d..fb8b5892b5ffa8ec6b0cf35792991b9125f082eb 100644 (file)
@@ -143,7 +143,7 @@ next_rule:
        list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
 
                /* This rule is not active, skip. */
-               if (unlikely(rule->genmask & (1 << gencursor)))
+               if (unlikely(rule->genmask & gencursor))
                        continue;
 
                rulenum++;
index aa93877ab6e2629bc31791135ace6ef474abd35e..5d36a0926b4a4859304fdd1808b428c87a2ee8c4 100644 (file)
@@ -557,7 +557,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 
        if (entskb->tstamp.tv64) {
                struct nfqnl_msg_packet_timestamp ts;
-               struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
+               struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
 
                ts.sec = cpu_to_be64(kts.tv_sec);
                ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
@@ -1482,21 +1482,29 @@ static int __net_init nfnl_queue_net_init(struct net *net)
                         net->nf.proc_netfilter, &nfqnl_file_ops))
                return -ENOMEM;
 #endif
+       nf_register_queue_handler(net, &nfqh);
        return 0;
 }
 
 static void __net_exit nfnl_queue_net_exit(struct net *net)
 {
+       nf_unregister_queue_handler(net);
 #ifdef CONFIG_PROC_FS
        remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
 #endif
 }
 
+static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
+{
+       synchronize_rcu();
+}
+
 static struct pernet_operations nfnl_queue_net_ops = {
-       .init   = nfnl_queue_net_init,
-       .exit   = nfnl_queue_net_exit,
-       .id     = &nfnl_queue_net_id,
-       .size   = sizeof(struct nfnl_queue_net),
+       .init           = nfnl_queue_net_init,
+       .exit           = nfnl_queue_net_exit,
+       .exit_batch     = nfnl_queue_net_exit_batch,
+       .id             = &nfnl_queue_net_id,
+       .size           = sizeof(struct nfnl_queue_net),
 };
 
 static int __init nfnetlink_queue_init(void)
@@ -1517,7 +1525,6 @@ static int __init nfnetlink_queue_init(void)
        }
 
        register_netdevice_notifier(&nfqnl_dev_notifier);
-       nf_register_queue_handler(&nfqh);
        return status;
 
 cleanup_netlink_notifier:
@@ -1529,7 +1536,6 @@ out:
 
 static void __exit nfnetlink_queue_fini(void)
 {
-       nf_unregister_queue_handler();
        unregister_netdevice_notifier(&nfqnl_dev_notifier);
        nfnetlink_subsys_unregister(&nfqnl_subsys);
        netlink_unregister_notifier(&nfqnl_rtnl_notifier);
index 137e308d5b24c0865336da92e97686d8fef0b9d1..81fbb450783e59932ecc1c67147fc0ca89b475d3 100644 (file)
@@ -54,7 +54,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
        const struct nf_conn_help *help;
        const struct nf_conntrack_tuple *tuple;
        const struct nf_conntrack_helper *helper;
-       long diff;
        unsigned int state;
 
        ct = nf_ct_get(pkt->skb, &ctinfo);
@@ -94,10 +93,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                return;
 #endif
        case NFT_CT_EXPIRATION:
-               diff = (long)jiffies - (long)ct->timeout.expires;
-               if (diff < 0)
-                       diff = 0;
-               *dest = jiffies_to_msecs(diff);
+               *dest = jiffies_to_msecs(nf_ct_expires(ct));
                return;
        case NFT_CT_HELPER:
                if (ct->master == NULL)
index 6fa016564f90cbf821a4ae45ac580b2e4e7c43ec..f39c53a159eba6cbe9586599b04147e8247d9523 100644 (file)
@@ -189,7 +189,6 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
        struct nft_hash_elem *he;
        struct rhashtable_iter hti;
        struct nft_set_elem elem;
-       u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
        int err;
 
        err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL);
@@ -218,7 +217,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
                        goto cont;
                if (nft_set_elem_expired(&he->ext))
                        goto cont;
-               if (!nft_set_elem_active(&he->ext, genmask))
+               if (!nft_set_elem_active(&he->ext, iter->genmask))
                        goto cont;
 
                elem.priv = he;
index 16c50b0dd426840f79bda0bb3ebbd28ecb2845e5..f4bad9dc15c48b0d8635632dc50179457b5c49c1 100644 (file)
@@ -227,7 +227,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
                        skb->pkt_type = value;
                break;
        case NFT_META_NFTRACE:
-               skb->nf_trace = 1;
+               skb->nf_trace = !!value;
                break;
        default:
                WARN_ON(1);
index f762094af7c1ca7e7684448381e7e49ee430045e..7201d57b5a932ebbb8d7fe72c691d5e69ff99784 100644 (file)
@@ -211,7 +211,6 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
        struct nft_rbtree_elem *rbe;
        struct nft_set_elem elem;
        struct rb_node *node;
-       u8 genmask = nft_genmask_cur(read_pnet(&set->pnet));
 
        spin_lock_bh(&nft_rbtree_lock);
        for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
@@ -219,7 +218,7 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
 
                if (iter->count < iter->skip)
                        goto cont;
-               if (!nft_set_elem_active(&rbe->ext, genmask))
+               if (!nft_set_elem_active(&rbe->ext, iter->genmask))
                        goto cont;
 
                elem.priv = rbe;
index c69c892231d7b6b8699f20913847f64f1a758a42..2675d580c490b3de07562ce9174a0b4f69d55035 100644 (file)
@@ -612,7 +612,7 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems,
                return -EINVAL;
 
        if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
-           target_offset + sizeof(struct compat_xt_standard_target) != next_offset)
+           COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
                return -EINVAL;
 
        /* compat_xt_entry match has less strict aligment requirements,
@@ -694,7 +694,7 @@ int xt_check_entry_offsets(const void *base,
                return -EINVAL;
 
        if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
-           target_offset + sizeof(struct xt_standard_target) != next_offset)
+           XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
                return -EINVAL;
 
        return xt_check_entry_match(elems, base + target_offset,
index 879185fe183fd0ffa2bf037725faf0a8eb5f166a..9a3eb7a0ebf4bde84f1f323aac88ca0cbc8a8f80 100644 (file)
@@ -137,11 +137,23 @@ static bool is_flow_key_valid(const struct sw_flow_key *key)
        return !!key->eth.type;
 }
 
+static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
+                            __be16 ethertype)
+{
+       if (skb->ip_summed == CHECKSUM_COMPLETE) {
+               __be16 diff[] = { ~(hdr->h_proto), ethertype };
+
+               skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+                                       ~skb->csum);
+       }
+
+       hdr->h_proto = ethertype;
+}
+
 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
                     const struct ovs_action_push_mpls *mpls)
 {
        __be32 *new_mpls_lse;
-       struct ethhdr *hdr;
 
        /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
        if (skb->encapsulation)
@@ -160,9 +172,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 
        skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
 
-       hdr = eth_hdr(skb);
-       hdr->h_proto = mpls->mpls_ethertype;
-
+       update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
        if (!skb->inner_protocol)
                skb_set_inner_protocol(skb, skb->protocol);
        skb->protocol = mpls->mpls_ethertype;
@@ -193,7 +203,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
         * field correctly in the presence of VLAN tags.
         */
        hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
-       hdr->h_proto = ethertype;
+       update_ethertype(skb, hdr, ethertype);
        if (eth_p_mpls(skb->protocol))
                skb->protocol = ethertype;
 
index 3d5feede962dc584408e9b5a79e4b723f5308319..d84312584ee46a5fa82040c1802b98897f4b5aef 100644 (file)
@@ -818,8 +818,18 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                 */
                state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
                __ovs_ct_update_key(key, state, &info->zone, exp->master);
-       } else
-               return __ovs_ct_lookup(net, key, info, skb);
+       } else {
+               struct nf_conn *ct;
+               int err;
+
+               err = __ovs_ct_lookup(net, key, info, skb);
+               if (err)
+                       return err;
+
+               ct = (struct nf_conn *)skb->nfct;
+               if (ct)
+                       nf_ct_deliver_cached_events(ct);
+       }
 
        return 0;
 }
index 4040eb92d9c9dc3b1ce0ba41227b6b1e9bd32eff..b43c4015b2f79678bb5a2edffdd4e57cf9bdb880 100644 (file)
@@ -93,6 +93,7 @@
 #include <net/inet_common.h>
 #endif
 #include <linux/bpf.h>
+#include <net/compat.h>
 
 #include "internal.h"
 
@@ -1340,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
                                      struct sk_buff *skb,
                                      unsigned int num)
 {
-       return reciprocal_scale(skb_get_hash(skb), num);
+       return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
 }
 
 static unsigned int fanout_demux_lb(struct packet_fanout *f,
@@ -1926,13 +1927,11 @@ retry:
                goto out_unlock;
        }
 
-       sockc.tsflags = 0;
+       sockc.tsflags = sk->sk_tsflags;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
-               if (unlikely(err)) {
-                       err = -EINVAL;
+               if (unlikely(err))
                        goto out_unlock;
-               }
        }
 
        skb->protocol = proto;
@@ -2677,7 +2676,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
        }
 
-       sockc.tsflags = 0;
+       sockc.tsflags = po->sk.sk_tsflags;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(&po->sk, msg, &sockc);
                if (unlikely(err))
@@ -2880,7 +2879,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        if (unlikely(!(dev->flags & IFF_UP)))
                goto out_unlock;
 
-       sockc.tsflags = 0;
+       sockc.tsflags = sk->sk_tsflags;
        sockc.mark = sk->sk_mark;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
@@ -3940,6 +3939,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 }
 
 
+#ifdef CONFIG_COMPAT
+static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
+                                   char __user *optval, unsigned int optlen)
+{
+       struct packet_sock *po = pkt_sk(sock->sk);
+
+       if (level != SOL_PACKET)
+               return -ENOPROTOOPT;
+
+       if (optname == PACKET_FANOUT_DATA &&
+           po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
+               optval = (char __user *)get_compat_bpf_fprog(optval);
+               if (!optval)
+                       return -EFAULT;
+               optlen = sizeof(struct sock_fprog);
+       }
+
+       return packet_setsockopt(sock, level, optname, optval, optlen);
+}
+#endif
+
 static int packet_notifier(struct notifier_block *this,
                           unsigned long msg, void *ptr)
 {
@@ -4416,6 +4436,9 @@ static const struct proto_ops packet_ops = {
        .shutdown =     sock_no_shutdown,
        .setsockopt =   packet_setsockopt,
        .getsockopt =   packet_getsockopt,
+#ifdef CONFIG_COMPAT
+       .compat_setsockopt = compat_packet_setsockopt,
+#endif
        .sendmsg =      packet_sendmsg,
        .recvmsg =      packet_recvmsg,
        .mmap =         packet_mmap,
index 310cabce23111cfaa45af97adef28f4d41d1bbda..7c2a65a6af5c6ab96eeef220c854ba01e846e35c 100644 (file)
@@ -111,7 +111,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
                }
        }
 
-       if (conn->c_version < RDS_PROTOCOL(3,1)) {
+       if (conn->c_version < RDS_PROTOCOL(3, 1)) {
                printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
                       " no longer supported\n",
                       &conn->c_faddr,
index 6b12b68541ae96fb8be76e72cb8d0e6f8c89abee..814173b466d9a80b21da91c5e38055221e9abcf8 100644 (file)
@@ -95,8 +95,9 @@ out:
  */
 static void rds_loop_inc_free(struct rds_incoming *inc)
 {
-        struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
-        rds_message_put(rm);
+       struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
+
+       rds_message_put(rm);
 }
 
 /* we need to at least give the thread something to succeed */
index 80256b08eac0388d702f5981f00703d2118f5cce..387df5f32e49b9084363dc3da3ff62aa252e5507 100644 (file)
@@ -74,6 +74,7 @@ enum {
        RDS_CONN_CONNECTING,
        RDS_CONN_DISCONNECTING,
        RDS_CONN_UP,
+       RDS_CONN_RESETTING,
        RDS_CONN_ERROR,
 };
 
@@ -813,6 +814,7 @@ void rds_connect_worker(struct work_struct *);
 void rds_shutdown_worker(struct work_struct *);
 void rds_send_worker(struct work_struct *);
 void rds_recv_worker(struct work_struct *);
+void rds_connect_path_complete(struct rds_connection *conn, int curr);
 void rds_connect_complete(struct rds_connection *conn);
 
 /* transport.c */
index c0be1ecd11c99ce57360687fa2b34f312c0cf0d6..8413f6c99e13519d7cbfa1ec6ce3877a0059caa7 100644 (file)
@@ -561,5 +561,7 @@ void rds_inc_info_copy(struct rds_incoming *inc,
                minfo.fport = inc->i_hdr.h_dport;
        }
 
+       minfo.flags = 0;
+
        rds_info_copy(iter, &minfo, sizeof(minfo));
 }
index c9cdb358ea885e3e356cc675b579f1313ed94ff9..b1962f8e30f7bfc9fa77fb9bb413c42c81018e57 100644 (file)
@@ -99,6 +99,7 @@ void rds_send_reset(struct rds_connection *conn)
        list_splice_init(&conn->c_retrans, &conn->c_send_queue);
        spin_unlock_irqrestore(&conn->c_lock, flags);
 }
+EXPORT_SYMBOL_GPL(rds_send_reset);
 
 static int acquire_in_xmit(struct rds_connection *conn)
 {
index c173f69e1479bfaf643b9e5c69f4c9a151b18c67..e381bbcd9cc1c37d93eb19647b2a2262d8d2bae9 100644 (file)
@@ -102,7 +102,8 @@ int rds_sysctl_init(void)
        rds_sysctl_reconnect_min = msecs_to_jiffies(1);
        rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
 
-       rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
+       rds_sysctl_reg_table =
+               register_net_sysctl(&init_net, "net/rds", rds_sysctl_rds_table);
        if (!rds_sysctl_reg_table)
                return -ENOMEM;
        return 0;
index 86187dad14403100ff6199ee79beed5262de818f..c8a7b4c90190cafe8dc6fefe3e0a1935c558c357 100644 (file)
@@ -126,9 +126,81 @@ void rds_tcp_restore_callbacks(struct socket *sock,
 }
 
 /*
- * This is the only path that sets tc->t_sock.  Send and receive trust that
- * it is set.  The RDS_CONN_UP bit protects those paths from being
- * called while it isn't set.
+ * rds_tcp_reset_callbacks() switches the to the new sock and
+ * returns the existing tc->t_sock.
+ *
+ * The only functions that set tc->t_sock are rds_tcp_set_callbacks
+ * and rds_tcp_reset_callbacks.  Send and receive trust that
+ * it is set.  The absence of RDS_CONN_UP bit protects those paths
+ * from being called while it isn't set.
+ */
+void rds_tcp_reset_callbacks(struct socket *sock,
+                            struct rds_connection *conn)
+{
+       struct rds_tcp_connection *tc = conn->c_transport_data;
+       struct socket *osock = tc->t_sock;
+
+       if (!osock)
+               goto newsock;
+
+       /* Need to resolve a duelling SYN between peers.
+        * We have an outstanding SYN to this peer, which may
+        * potentially have transitioned to the RDS_CONN_UP state,
+        * so we must quiesce any send threads before resetting
+        * c_transport_data. We quiesce these threads by setting
+        * c_state to something other than RDS_CONN_UP, and then
+        * waiting for any existing threads in rds_send_xmit to
+        * complete release_in_xmit(). (Subsequent threads entering
+        * rds_send_xmit() will bail on !rds_conn_up().
+        *
+        * However an incoming syn-ack at this point would end up
+        * marking the conn as RDS_CONN_UP, and would again permit
+        * rds_send_xmi() threads through, so ideally we would
+        * synchronize on RDS_CONN_UP after lock_sock(), but cannot
+        * do that: waiting on !RDS_IN_XMIT after lock_sock() may
+        * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
+        * would not get set. As a result, we set c_state to
+        * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
+        * cannot mark rds_conn_path_up() in the window before lock_sock()
+        */
+       atomic_set(&conn->c_state, RDS_CONN_RESETTING);
+       wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags));
+       lock_sock(osock->sk);
+       /* reset receive side state for rds_tcp_data_recv() for osock  */
+       if (tc->t_tinc) {
+               rds_inc_put(&tc->t_tinc->ti_inc);
+               tc->t_tinc = NULL;
+       }
+       tc->t_tinc_hdr_rem = sizeof(struct rds_header);
+       tc->t_tinc_data_rem = 0;
+       tc->t_sock = NULL;
+
+       write_lock_bh(&osock->sk->sk_callback_lock);
+
+       osock->sk->sk_user_data = NULL;
+       osock->sk->sk_data_ready = tc->t_orig_data_ready;
+       osock->sk->sk_write_space = tc->t_orig_write_space;
+       osock->sk->sk_state_change = tc->t_orig_state_change;
+       write_unlock_bh(&osock->sk->sk_callback_lock);
+       release_sock(osock->sk);
+       sock_release(osock);
+newsock:
+       rds_send_reset(conn);
+       lock_sock(sock->sk);
+       write_lock_bh(&sock->sk->sk_callback_lock);
+       tc->t_sock = sock;
+       sock->sk->sk_user_data = conn;
+       sock->sk->sk_data_ready = rds_tcp_data_ready;
+       sock->sk->sk_write_space = rds_tcp_write_space;
+       sock->sk->sk_state_change = rds_tcp_state_change;
+
+       write_unlock_bh(&sock->sk->sk_callback_lock);
+       release_sock(sock->sk);
+}
+
+/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
+ * above rds_tcp_reset_callbacks for notes about synchronization
+ * with data path
  */
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
 {
@@ -544,7 +616,7 @@ static int rds_tcp_init(void)
 
        ret = rds_tcp_recv_init();
        if (ret)
-               goto out_slab;
+               goto out_pernet;
 
        ret = rds_trans_register(&rds_tcp_transport);
        if (ret)
@@ -556,8 +628,9 @@ static int rds_tcp_init(void)
 
 out_recv:
        rds_tcp_recv_exit();
-out_slab:
+out_pernet:
        unregister_pernet_subsys(&rds_tcp_net_ops);
+out_slab:
        kmem_cache_destroy(rds_tcp_conn_slab);
 out:
        return ret;
index 41c228300525c029df02472b609ada1a71cea98e..7940babf6c718617c729247ac348b2b6ed68a53a 100644 (file)
@@ -50,6 +50,7 @@ struct rds_tcp_statistics {
 void rds_tcp_tune(struct socket *sock);
 void rds_tcp_nonagle(struct socket *sock);
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
+void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn);
 void rds_tcp_restore_callbacks(struct socket *sock,
                               struct rds_tcp_connection *tc);
 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
@@ -82,7 +83,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
 void rds_tcp_xmit_prepare(struct rds_connection *conn);
 void rds_tcp_xmit_complete(struct rds_connection *conn);
 int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
-                unsigned int hdr_off, unsigned int sg, unsigned int off);
+                unsigned int hdr_off, unsigned int sg, unsigned int off);
 void rds_tcp_write_space(struct sock *sk);
 
 /* tcp_stats.c */
index fb82e0a0bf89361e005a23c371f0e95dfa4970bc..f6e95d60db54b488e9e67375bf96b6cbda82d788 100644 (file)
@@ -54,19 +54,19 @@ void rds_tcp_state_change(struct sock *sk)
 
        rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
 
-       switch(sk->sk_state) {
-               /* ignore connecting sockets as they make progress */
-               case TCP_SYN_SENT:
-               case TCP_SYN_RECV:
-                       break;
-               case TCP_ESTABLISHED:
-                       rds_connect_complete(conn);
-                       break;
-               case TCP_CLOSE_WAIT:
-               case TCP_CLOSE:
-                       rds_conn_drop(conn);
-               default:
-                       break;
+       switch (sk->sk_state) {
+       /* ignore connecting sockets as they make progress */
+       case TCP_SYN_SENT:
+       case TCP_SYN_RECV:
+               break;
+       case TCP_ESTABLISHED:
+               rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+               break;
+       case TCP_CLOSE_WAIT:
+       case TCP_CLOSE:
+               rds_conn_drop(conn);
+       default:
+               break;
        }
 out:
        read_unlock_bh(&sk->sk_callback_lock);
index 4bf4befe5066a0084a7430cc39f9fd0c08a9a20e..245542ca4718dd69f6f3e87903136d8307e4f055 100644 (file)
@@ -78,7 +78,6 @@ int rds_tcp_accept_one(struct socket *sock)
        struct inet_sock *inet;
        struct rds_tcp_connection *rs_tcp = NULL;
        int conn_state;
-       struct sock *nsk;
 
        if (!sock) /* module unload or netns delete in progress */
                return -ENETUNREACH;
@@ -136,26 +135,21 @@ int rds_tcp_accept_one(struct socket *sock)
                    !conn->c_outgoing) {
                        goto rst_nsk;
                } else {
-                       atomic_set(&conn->c_state, RDS_CONN_CONNECTING);
-                       wait_event(conn->c_waitq,
-                                  !test_bit(RDS_IN_XMIT, &conn->c_flags));
-                       rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+                       rds_tcp_reset_callbacks(new_sock, conn);
                        conn->c_outgoing = 0;
+                       /* rds_connect_path_complete() marks RDS_CONN_UP */
+                       rds_connect_path_complete(conn, RDS_CONN_RESETTING);
                }
+       } else {
+               rds_tcp_set_callbacks(new_sock, conn);
+               rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
        }
-       rds_tcp_set_callbacks(new_sock, conn);
-       rds_connect_complete(conn); /* marks RDS_CONN_UP */
        new_sock = NULL;
        ret = 0;
        goto out;
 rst_nsk:
        /* reset the newly returned accept sock and bail */
-       nsk = new_sock->sk;
-       rds_tcp_stats_inc(s_tcp_listen_closed_stale);
-       nsk->sk_user_data = NULL;
-       nsk->sk_prot->disconnect(nsk, 0);
-       tcp_done(nsk);
-       new_sock = NULL;
+       kernel_sock_shutdown(new_sock, SHUT_RDWR);
        ret = 0;
 out:
        if (rs_tcp)
index c3196f9d070aaf9429947969120662f974ae44a7..6e6a7111a03406ae9a430c3c5efe6f56fe08a8da 100644 (file)
@@ -171,7 +171,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
        while (left) {
                if (!tinc) {
                        tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
-                                               arg->gfp);
+                                               arg->gfp);
                        if (!tinc) {
                                desc->error = -ENOMEM;
                                goto out;
index 22d0f2020a79f834d3a2a6536253e16c690baa41..618be69c9c3b7668c040584b1c4b40a8058f5e38 100644 (file)
@@ -66,19 +66,19 @@ void rds_tcp_xmit_complete(struct rds_connection *conn)
 static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
 {
        struct kvec vec = {
-                .iov_base = data,
-                .iov_len = len,
+               .iov_base = data,
+               .iov_len = len,
+       };
+       struct msghdr msg = {
+               .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
        };
-        struct msghdr msg = {
-                .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
-        };
 
        return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
 }
 
 /* the core send_sem serializes this with other xmit and shutdown */
 int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
-                unsigned int hdr_off, unsigned int sg, unsigned int off)
+                unsigned int hdr_off, unsigned int sg, unsigned int off)
 {
        struct rds_tcp_connection *tc = conn->c_transport_data;
        int done = 0;
@@ -196,7 +196,7 @@ void rds_tcp_write_space(struct sock *sk)
        tc->t_last_seen_una = rds_tcp_snd_una(tc);
        rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
 
-        if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
+       if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
index 454aa6d23327b2a302bcfc72b6b3d857656e33ed..4a323045719b5c0aa0407916a772fe65aa900b0b 100644 (file)
@@ -71,9 +71,9 @@
 struct workqueue_struct *rds_wq;
 EXPORT_SYMBOL_GPL(rds_wq);
 
-void rds_connect_complete(struct rds_connection *conn)
+void rds_connect_path_complete(struct rds_connection *conn, int curr)
 {
-       if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) {
+       if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) {
                printk(KERN_WARNING "%s: Cannot transition to state UP, "
                                "current state is %d\n",
                                __func__,
@@ -90,6 +90,12 @@ void rds_connect_complete(struct rds_connection *conn)
        queue_delayed_work(rds_wq, &conn->c_send_w, 0);
        queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 }
+EXPORT_SYMBOL_GPL(rds_connect_path_complete);
+
+void rds_connect_complete(struct rds_connection *conn)
+{
+       rds_connect_path_complete(conn, RDS_CONN_CONNECTING);
+}
 EXPORT_SYMBOL_GPL(rds_connect_complete);
 
 /*
index f3afd1d60d3c7a7880993554c6375754fb4c6ff8..2ffd3e30c6434e62333ac4eefdc1b8fea50dfbe1 100644 (file)
@@ -140,8 +140,7 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
        rds_info_iter_unmap(iter);
        down_read(&rds_trans_sem);
 
-       for (i = 0; i < RDS_TRANS_COUNT; i++)
-       {
+       for (i = 0; i < RDS_TRANS_COUNT; i++) {
                trans = transports[i];
                if (!trans || !trans->stats_info_copy)
                        continue;
index 79c4abcfa6b4ee86fbd92358f5585f88b74319f5..0a6394754e81db5469906b92f4b40046444bdb42 100644 (file)
@@ -164,7 +164,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
                rose_frames_acked(sk, nr);
                if (ns == rose->vr) {
                        rose_start_idletimer(sk);
-                       if (sock_queue_rcv_skb(sk, skb) == 0) {
+                       if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+                           __sock_queue_rcv_skb(sk, skb) == 0) {
                                rose->vr = (rose->vr + 1) % ROSE_MODULUS;
                                queued = 1;
                        } else {
index 6b726a046a7d47995ecc281f67979073fe02c102..bab56ed649ba034847375de135b7b17357f0e532 100644 (file)
@@ -1162,9 +1162,7 @@ static int rxkad_init(void)
        /* pin the cipher we need so that the crypto layer doesn't invoke
         * keventd to go get it */
        rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(rxkad_ci))
-               return PTR_ERR(rxkad_ci);
-       return 0;
+       return PTR_ERR_OR_ZERO(rxkad_ci);
 }
 
 /*
index 336774a535c3959f4f25b05d1732c014d0d1763c..c7a0b0d481c08ab7533d273e57c1897c2760d1d0 100644 (file)
@@ -1118,7 +1118,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
                nla_nest_end(skb, nest);
                ret = skb->len;
        } else
-               nla_nest_cancel(skb, nest);
+               nlmsg_trim(skb, b);
 
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        if (NETLINK_CB(cb->skb).portid && ret)
index 658046dfe02d7210501ee9f0324158b50f6e7858..ea4a2fef1b71c506a89ca668c115db367aaa79a6 100644 (file)
@@ -106,9 +106,9 @@ int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
 }
 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
 
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
 {
-       mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL);
+       mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
        if (!mi->metaval)
                return -ENOMEM;
 
@@ -116,9 +116,9 @@ int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval)
 }
 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
 
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval)
+int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
 {
-       mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL);
+       mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
        if (!mi->metaval)
                return -ENOMEM;
 
@@ -240,10 +240,10 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
 */
 static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
-                               void *val, int len)
+                               void *val, int len, bool exists)
 {
        struct tcf_meta_ops *ops = find_ife_oplist(metaid);
        int ret = 0;
@@ -251,11 +251,13 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
        if (!ops) {
                ret = -ENOENT;
 #ifdef CONFIG_MODULES
-               spin_unlock_bh(&ife->tcf_lock);
+               if (exists)
+                       spin_unlock_bh(&ife->tcf_lock);
                rtnl_unlock();
                request_module("ifemeta%u", metaid);
                rtnl_lock();
-               spin_lock_bh(&ife->tcf_lock);
+               if (exists)
+                       spin_lock_bh(&ife->tcf_lock);
                ops = find_ife_oplist(metaid);
 #endif
        }
@@ -272,10 +274,10 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock
+ * under ife->tcf_lock for existing action
 */
 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
-                       int len)
+                       int len, bool atomic)
 {
        struct tcf_meta_info *mi = NULL;
        struct tcf_meta_ops *ops = find_ife_oplist(metaid);
@@ -284,7 +286,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
        if (!ops)
                return -ENOENT;
 
-       mi = kzalloc(sizeof(*mi), GFP_KERNEL);
+       mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
        if (!mi) {
                /*put back what find_ife_oplist took */
                module_put(ops->owner);
@@ -294,7 +296,7 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
        mi->metaid = metaid;
        mi->ops = ops;
        if (len > 0) {
-               ret = ops->alloc(mi, metaval);
+               ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
                if (ret != 0) {
                        kfree(mi);
                        module_put(ops->owner);
@@ -313,11 +315,13 @@ static int use_all_metadata(struct tcf_ife_info *ife)
        int rc = 0;
        int installed = 0;
 
+       read_lock(&ife_mod_lock);
        list_for_each_entry(o, &ifeoplist, list) {
-               rc = add_metainfo(ife, o->metaid, NULL, 0);
+               rc = add_metainfo(ife, o->metaid, NULL, 0, true);
                if (rc == 0)
                        installed += 1;
        }
+       read_unlock(&ife_mod_lock);
 
        if (installed)
                return 0;
@@ -385,8 +389,9 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind)
        spin_unlock_bh(&ife->tcf_lock);
 }
 
-/* under ife->tcf_lock */
-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
+/* under ife->tcf_lock for existing action */
+static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+                            bool exists)
 {
        int len = 0;
        int rc = 0;
@@ -398,11 +403,11 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb)
                        val = nla_data(tb[i]);
                        len = nla_len(tb[i]);
 
-                       rc = load_metaops_and_vet(ife, i, val, len);
+                       rc = load_metaops_and_vet(ife, i, val, len, exists);
                        if (rc != 0)
                                return rc;
 
-                       rc = add_metainfo(ife, i, val, len);
+                       rc = add_metainfo(ife, i, val, len, exists);
                        if (rc)
                                return rc;
                }
@@ -474,7 +479,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        saddr = nla_data(tb[TCA_IFE_SMAC]);
        }
 
-       spin_lock_bh(&ife->tcf_lock);
+       if (exists)
+               spin_lock_bh(&ife->tcf_lock);
        ife->tcf_action = parm->action;
 
        if (parm->flags & IFE_ENCODE) {
@@ -504,11 +510,12 @@ metadata_parse_err:
                        if (ret == ACT_P_CREATED)
                                _tcf_ife_cleanup(a, bind);
 
-                       spin_unlock_bh(&ife->tcf_lock);
+                       if (exists)
+                               spin_unlock_bh(&ife->tcf_lock);
                        return err;
                }
 
-               err = populate_metalist(ife, tb2);
+               err = populate_metalist(ife, tb2, exists);
                if (err)
                        goto metadata_parse_err;
 
@@ -523,12 +530,14 @@ metadata_parse_err:
                        if (ret == ACT_P_CREATED)
                                _tcf_ife_cleanup(a, bind);
 
-                       spin_unlock_bh(&ife->tcf_lock);
+                       if (exists)
+                               spin_unlock_bh(&ife->tcf_lock);
                        return err;
                }
        }
 
-       spin_unlock_bh(&ife->tcf_lock);
+       if (exists)
+               spin_unlock_bh(&ife->tcf_lock);
 
        if (ret == ACT_P_CREATED)
                tcf_hash_insert(tn, a);
index 9f002ada7074ccdb648433b443870562962c8694..d4bd19ee5822d4849cbf6c473d3245fb2cbff094 100644 (file)
@@ -121,10 +121,13 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
        }
 
        td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
-       if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
+       if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+               if (exists)
+                       tcf_hash_release(a, bind);
                return -EINVAL;
+       }
 
-       if (!tcf_hash_check(tn, index, a, bind)) {
+       if (!exists) {
                ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
                                      false);
                if (ret)
index 128942bc9e42e82ed11dad14448b69cd6858e541..1f5bd6ccbd2c6162f328aa209e2140bad99f1e3f 100644 (file)
@@ -181,7 +181,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
 
        if (!(at & AT_EGRESS)) {
                if (m->tcfm_ok_push)
-                       skb_push(skb2, skb->mac_len);
+                       skb_push_rcsum(skb2, skb->mac_len);
        }
 
        /* mirror is always swallowed */
index 330f14e302e884fa65fc1eea895b8ea4a123d783..c557789765dc6fed833a27b7d0353e3bda0a477f 100644 (file)
@@ -38,7 +38,7 @@ struct tcf_police {
        bool                    peak_present;
 };
 #define to_police(pc)  \
-       container_of(pc, struct tcf_police, common)
+       container_of(pc->priv, struct tcf_police, common)
 
 #define POL_TAB_MASK     15
 
@@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
                                 struct nlattr *est, struct tc_action *a,
                                 int ovr, int bind)
 {
-       unsigned int h;
        int ret = 0, err;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
        struct tc_police *parm;
        struct tcf_police *police;
        struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
        struct tc_action_net *tn = net_generic(net, police_net_id);
-       struct tcf_hashinfo *hinfo = tn->hinfo;
        int size;
 
        if (nla == NULL)
@@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
 
        if (parm->index) {
                if (tcf_hash_search(tn, a, parm->index)) {
-                       police = to_police(a->priv);
+                       police = to_police(a);
                        if (bind) {
                                police->tcf_bindcnt += 1;
                                police->tcf_refcnt += 1;
@@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
                        /* not replacing */
                        return -EEXIST;
                }
+       } else {
+               ret = tcf_hash_create(tn, parm->index, NULL, a,
+                                     sizeof(*police), bind, false);
+               if (ret)
+                       return ret;
+               ret = ACT_P_CREATED;
        }
 
-       police = kzalloc(sizeof(*police), GFP_KERNEL);
-       if (police == NULL)
-               return -ENOMEM;
-       ret = ACT_P_CREATED;
-       police->tcf_refcnt = 1;
-       spin_lock_init(&police->tcf_lock);
-       if (bind)
-               police->tcf_bindcnt = 1;
+       police = to_police(a);
 override:
        if (parm->rate.rate) {
                err = -ENOMEM;
@@ -237,14 +234,8 @@ override:
                return ret;
 
        police->tcfp_t_c = ktime_get_ns();
-       police->tcf_index = parm->index ? parm->index :
-               tcf_hash_new_index(tn);
-       h = tcf_hash(police->tcf_index, POL_TAB_MASK);
-       spin_lock_bh(&hinfo->lock);
-       hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
-       spin_unlock_bh(&hinfo->lock);
+       tcf_hash_insert(tn, a);
 
-       a->priv = police;
        return ret;
 
 failure_unlock:
@@ -253,7 +244,7 @@ failure:
        qdisc_put_rtab(P_tab);
        qdisc_put_rtab(R_tab);
        if (ret == ACT_P_CREATED)
-               kfree(police);
+               tcf_hash_cleanup(a, est);
        return err;
 }
 
@@ -268,6 +259,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
        spin_lock(&police->tcf_lock);
 
        bstats_update(&police->tcf_bstats, skb);
+       tcf_lastuse_update(&police->tcf_tm);
 
        if (police->tcfp_ewma_rate &&
            police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
@@ -327,6 +319,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
                .refcnt = police->tcf_refcnt - ref,
                .bindcnt = police->tcf_bindcnt - bind,
        };
+       struct tcf_t t;
 
        if (police->rate_present)
                psched_ratecfg_getrate(&opt.rate, &police->rate);
@@ -340,6 +333,13 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
        if (police->tcfp_ewma_rate &&
            nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
                goto nla_put_failure;
+
+       t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
+       t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
+       t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
+       if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
+               goto nla_put_failure;
+
        return skb->len;
 
 nla_put_failure:
index 730aacafc22d8638ccebce757cc5b6f8968bd39f..b3b7978f418214104d5d360aa4ea6154b36fc038 100644 (file)
@@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie)
        struct tc_cls_flower_offload offload = {0};
        struct tc_to_netdev tc;
 
-       if (!tc_should_offload(dev, 0))
+       if (!tc_should_offload(dev, tp, 0))
                return;
 
        offload.command = TC_CLSFLOWER_DESTROY;
@@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp,
        struct tc_cls_flower_offload offload = {0};
        struct tc_to_netdev tc;
 
-       if (!tc_should_offload(dev, flags))
+       if (!tc_should_offload(dev, tp, flags))
                return;
 
        offload.command = TC_CLSFLOWER_REPLACE;
@@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
        struct tc_cls_flower_offload offload = {0};
        struct tc_to_netdev tc;
 
-       if (!tc_should_offload(dev, 0))
+       if (!tc_should_offload(dev, tp, 0))
                return;
 
        offload.command = TC_CLSFLOWER_STATS;
index 079b43b3c5d24dc5eaf7bc5e7b7d0d4d0c672ca2..ffe593efe930680d4ec1a92b619256f12cb9ecda 100644 (file)
@@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, 0)) {
+       if (tc_should_offload(dev, tp, 0)) {
                offload.cls_u32->command = TC_CLSU32_DELETE_KNODE;
                offload.cls_u32->knode.handle = handle;
                dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
@@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp,
        struct tc_to_netdev offload;
        int err;
 
+       if (!tc_should_offload(dev, tp, flags))
+               return tc_skip_sw(flags) ? -EINVAL : 0;
+
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, flags)) {
-               offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
-               offload.cls_u32->hnode.divisor = h->divisor;
-               offload.cls_u32->hnode.handle = h->handle;
-               offload.cls_u32->hnode.prio = h->prio;
+       offload.cls_u32->command = TC_CLSU32_NEW_HNODE;
+       offload.cls_u32->hnode.divisor = h->divisor;
+       offload.cls_u32->hnode.handle = h->handle;
+       offload.cls_u32->hnode.prio = h->prio;
 
-               err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                                   tp->protocol, &offload);
-               if (tc_skip_sw(flags))
-                       return err;
-       }
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->protocol, &offload);
+       if (tc_skip_sw(flags))
+               return err;
 
        return 0;
 }
@@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, 0)) {
+       if (tc_should_offload(dev, tp, 0)) {
                offload.cls_u32->command = TC_CLSU32_DELETE_HNODE;
                offload.cls_u32->hnode.divisor = h->divisor;
                offload.cls_u32->hnode.handle = h->handle;
@@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp,
        offload.type = TC_SETUP_CLSU32;
        offload.cls_u32 = &u32_offload;
 
-       if (tc_should_offload(dev, flags)) {
-               offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
-               offload.cls_u32->knode.handle = n->handle;
-               offload.cls_u32->knode.fshift = n->fshift;
+       if (!tc_should_offload(dev, tp, flags))
+               return tc_skip_sw(flags) ? -EINVAL : 0;
+
+       offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE;
+       offload.cls_u32->knode.handle = n->handle;
+       offload.cls_u32->knode.fshift = n->fshift;
 #ifdef CONFIG_CLS_U32_MARK
-               offload.cls_u32->knode.val = n->val;
-               offload.cls_u32->knode.mask = n->mask;
+       offload.cls_u32->knode.val = n->val;
+       offload.cls_u32->knode.mask = n->mask;
 #else
-               offload.cls_u32->knode.val = 0;
-               offload.cls_u32->knode.mask = 0;
+       offload.cls_u32->knode.val = 0;
+       offload.cls_u32->knode.mask = 0;
 #endif
-               offload.cls_u32->knode.sel = &n->sel;
-               offload.cls_u32->knode.exts = &n->exts;
-               if (n->ht_down)
-                       offload.cls_u32->knode.link_handle = n->ht_down->handle;
-
-               err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                                   tp->protocol, &offload);
-               if (tc_skip_sw(flags))
-                       return err;
-       }
+       offload.cls_u32->knode.sel = &n->sel;
+       offload.cls_u32->knode.exts = &n->exts;
+       if (n->ht_down)
+               offload.cls_u32->knode.link_handle = n->ht_down->handle;
+
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->protocol, &offload);
+       if (tc_skip_sw(flags))
+               return err;
 
        return 0;
 }
@@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        if (tb[TCA_U32_FLAGS]) {
                flags = nla_get_u32(tb[TCA_U32_FLAGS]);
                if (!tc_flags_valid(flags))
-                       return err;
+                       return -EINVAL;
        }
 
        n = (struct tc_u_knode *)*arg;
@@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                ht->divisor = divisor;
                ht->handle = handle;
                ht->prio = tp->prio;
+
+               err = u32_replace_hw_hnode(tp, ht, flags);
+               if (err) {
+                       kfree(ht);
+                       return err;
+               }
+
                RCU_INIT_POINTER(ht->next, tp_c->hlist);
                rcu_assign_pointer(tp_c->hlist, ht);
                *arg = (unsigned long)ht;
 
-               u32_replace_hw_hnode(tp, ht, flags);
                return 0;
        }
 
index 64f71a2155f3ae8199123c0ee635fc9fa420c35b..ddf047df5361b8c0d05fead97ee1b5ccdeee4113 100644 (file)
@@ -607,6 +607,10 @@ void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool thr
        if (throttle)
                qdisc_throttled(wd->qdisc);
 
+       if (wd->last_expires == expires)
+               return;
+
+       wd->last_expires = expires;
        hrtimer_start(&wd->timer,
                      ns_to_ktime(expires),
                      HRTIMER_MODE_ABS_PINNED);
index a63e879e89758fe954ab778fe776ddf6fc4e3536..bf8af2c43c2ce8b988b26f58d04074d367f987a6 100644 (file)
@@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                cl->deficit = cl->quantum;
        }
 
+       qdisc_qstats_backlog_inc(sch, skb);
        sch->q.qlen++;
        return err;
 }
@@ -407,6 +408,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 
                        bstats_update(&cl->bstats, skb);
                        qdisc_bstats_update(sch, skb);
+                       qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
                        return skb;
                }
@@ -428,6 +430,7 @@ static unsigned int drr_drop(struct Qdisc *sch)
                if (cl->qdisc->ops->drop) {
                        len = cl->qdisc->ops->drop(cl->qdisc);
                        if (len > 0) {
+                               sch->qstats.backlog -= len;
                                sch->q.qlen--;
                                if (cl->qdisc->q.qlen == 0)
                                        list_del(&cl->alist);
@@ -463,6 +466,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
                        qdisc_reset(cl->qdisc);
                }
        }
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
index 2177eac0a61ed00c6c60655f577e0dd816fd2c08..2e4bd2c0a50c497fbc6cbfe5338cab981559b491 100644 (file)
@@ -37,14 +37,18 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
+       unsigned int prev_backlog;
+
        if (likely(skb_queue_len(&sch->q) < sch->limit))
                return qdisc_enqueue_tail(skb, sch);
 
+       prev_backlog = sch->qstats.backlog;
        /* queue full, remove one skb to fulfill the limit */
        __qdisc_queue_drop_head(sch, &sch->q);
        qdisc_qstats_drop(sch);
        qdisc_enqueue_tail(skb, sch);
 
+       qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
        return NET_XMIT_CN;
 }
 
index 6883a89715625fc7874780ef962e51dc7773a35b..da250b2e06ae3b2cc054ff1d31e29649a74f6e13 100644 (file)
@@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        unsigned int idx, prev_backlog, prev_qlen;
        struct fq_codel_flow *flow;
        int uninitialized_var(ret);
+       unsigned int pkt_len;
        bool memory_limited;
 
        idx = fq_codel_classify(skb, sch, &ret);
@@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        prev_backlog = sch->qstats.backlog;
        prev_qlen = sch->q.qlen;
 
+       /* save this packet length as it might be dropped by fq_codel_drop() */
+       pkt_len = qdisc_pkt_len(skb);
        /* fq_codel_drop() is quite expensive, as it performs a linear search
         * in q->backlogs[] to find a fat flow.
         * So instead of dropping a single packet, drop half of its backlog
@@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
         */
        ret = fq_codel_drop(sch, q->drop_batch_size);
 
-       q->drop_overlimit += prev_qlen - sch->q.qlen;
+       prev_qlen -= sch->q.qlen;
+       prev_backlog -= sch->qstats.backlog;
+       q->drop_overlimit += prev_qlen;
        if (memory_limited)
-               q->drop_overmemory += prev_qlen - sch->q.qlen;
-       /* As we dropped packet(s), better let upper stack know this */
-       qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen,
-                                 prev_backlog - sch->qstats.backlog);
+               q->drop_overmemory += prev_qlen;
 
-       return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS;
+       /* As we dropped packet(s), better let upper stack know this.
+        * If we dropped a packet for this flow, return NET_XMIT_CN,
+        * but in this case, our parents wont increase their backlogs.
+        */
+       if (ret == idx) {
+               qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
+                                         prev_backlog - pkt_len);
+               return NET_XMIT_CN;
+       }
+       qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
+       return NET_XMIT_SUCCESS;
 }
 
 /* This is the specific function called from codel_dequeue()
@@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                qs.backlog = q->backlogs[idx];
                qs.drops = flow->dropped;
        }
-       if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
+       if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
                return -1;
        if (idx < q->flows_cnt)
                return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
index 269dd71b3828c03867c5dbbe7b041ad4babcf1f1..f9e0e9c03d0a3fba3daf7fb420297ad7e859dccf 100644 (file)
@@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
 {
        q->gso_skb = skb;
        q->qstats.requeues++;
+       qdisc_qstats_backlog_inc(q, skb);
        q->q.qlen++;    /* it's still part of the queue */
        __netif_schedule(q);
 
@@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
                txq = skb_get_tx_queue(txq->dev, skb);
                if (!netif_xmit_frozen_or_stopped(txq)) {
                        q->gso_skb = NULL;
+                       qdisc_qstats_backlog_dec(q, skb);
                        q->q.qlen--;
                } else
                        skb = NULL;
index d783d7cc33487f284677e107f5083588f559f742..1ac9f9f03fe31fdf8d63eccfad0c7a13939183c6 100644 (file)
@@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
        q->eligible = RB_ROOT;
        INIT_LIST_HEAD(&q->droplist);
        qdisc_watchdog_cancel(&q->watchdog);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
@@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
        struct hfsc_sched *q = qdisc_priv(sch);
        unsigned char *b = skb_tail_pointer(skb);
        struct tc_hfsc_qopt qopt;
-       struct hfsc_class *cl;
-       unsigned int i;
-
-       sch->qstats.backlog = 0;
-       for (i = 0; i < q->clhash.hashsize; i++) {
-               hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
-                       sch->qstats.backlog += cl->qdisc->qstats.backlog;
-       }
 
        qopt.defcls = q->defcls;
        if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
@@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (cl->qdisc->q.qlen == 1)
                set_active(cl, qdisc_pkt_len(skb));
 
+       qdisc_qstats_backlog_inc(sch, skb);
        sch->q.qlen++;
 
        return NET_XMIT_SUCCESS;
@@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch)
 
        qdisc_unthrottled(sch);
        qdisc_bstats_update(sch, skb);
+       qdisc_qstats_backlog_dec(sch, skb);
        sch->q.qlen--;
 
        return skb;
@@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch)
                        }
                        cl->qstats.drops++;
                        qdisc_qstats_drop(sch);
+                       sch->qstats.backlog -= len;
                        sch->q.qlen--;
                        return len;
                }
index f6bf5818ed4d265feafe9fd44319371fbc5c1d26..052f84d6cc236176b64adbd2c2c3c7f38b18cb43 100644 (file)
@@ -928,17 +928,10 @@ ok:
                }
        }
        qdisc_qstats_overlimit(sch);
-       if (likely(next_event > q->now)) {
-               if (!test_bit(__QDISC_STATE_DEACTIVATED,
-                             &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
-                       ktime_t time = ns_to_ktime(next_event);
-                       qdisc_throttled(q->watchdog.qdisc);
-                       hrtimer_start(&q->watchdog.timer, time,
-                                     HRTIMER_MODE_ABS_PINNED);
-               }
-       } else {
+       if (likely(next_event > q->now))
+               qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
+       else
                schedule_work(&q->work);
-       }
 fin:
        return skb;
 }
@@ -1014,7 +1007,9 @@ static void htb_work_func(struct work_struct *work)
        struct htb_sched *q = container_of(work, struct htb_sched, work);
        struct Qdisc *sch = q->watchdog.qdisc;
 
+       rcu_read_lock();
        __netif_schedule(qdisc_root(sch));
+       rcu_read_unlock();
 }
 
 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
@@ -1145,8 +1140,10 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 
        if (!cl->level && cl->un.leaf.q)
                qlen = cl->un.leaf.q->q.qlen;
-       cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
-       cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
+       cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
+                                   INT_MIN, INT_MAX);
+       cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
+                                    INT_MIN, INT_MAX);
 
        if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
index 10adbc617905e4386becd7fe272ecad5f96ee086..8fe6999b642ac3c039f40621f4ca123d129718c4 100644 (file)
@@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
        return TC_H_MIN(classid) + 1;
 }
 
+static bool ingress_cl_offload(u32 classid)
+{
+       return true;
+}
+
 static unsigned long ingress_bind_filter(struct Qdisc *sch,
                                         unsigned long parent, u32 classid)
 {
@@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = {
        .put            =       ingress_put,
        .walk           =       ingress_walk,
        .tcf_chain      =       ingress_find_tcf,
+       .tcf_cl_offload =       ingress_cl_offload,
        .bind_tcf       =       ingress_bind_filter,
        .unbind_tcf     =       ingress_put,
 };
@@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
        }
 }
 
+static bool clsact_cl_offload(u32 classid)
+{
+       return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS);
+}
+
 static unsigned long clsact_bind_filter(struct Qdisc *sch,
                                        unsigned long parent, u32 classid)
 {
@@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = {
        .put            =       ingress_put,
        .walk           =       ingress_walk,
        .tcf_chain      =       clsact_find_tcf,
+       .tcf_cl_offload =       clsact_cl_offload,
        .bind_tcf       =       clsact_bind_filter,
        .unbind_tcf     =       ingress_put,
 };
index 205bed00dd3463c62696ecc61eb78f2c97b3d0c9..178f1630a036414629aed50256cf6a8c3a105984 100644 (file)
@@ -650,14 +650,14 @@ deliver:
 #endif
 
                        if (q->qdisc) {
+                               unsigned int pkt_len = qdisc_pkt_len(skb);
                                int err = qdisc_enqueue(skb, q->qdisc);
 
-                               if (unlikely(err != NET_XMIT_SUCCESS)) {
-                                       if (net_xmit_drop_count(err)) {
-                                               qdisc_qstats_drop(sch);
-                                               qdisc_tree_reduce_backlog(sch, 1,
-                                                                         qdisc_pkt_len(skb));
-                                       }
+                               if (err != NET_XMIT_SUCCESS &&
+                                   net_xmit_drop_count(err)) {
+                                       qdisc_qstats_drop(sch);
+                                       qdisc_tree_reduce_backlog(sch, 1,
+                                                                 pkt_len);
                                }
                                goto tfifo_dequeue;
                        }
index fee1b15506b299755c685502a4a33283b458fef5..a356450b747ba57552d6c1e1e35255935580f02a 100644 (file)
@@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        ret = qdisc_enqueue(skb, qdisc);
        if (ret == NET_XMIT_SUCCESS) {
+               qdisc_qstats_backlog_inc(sch, skb);
                sch->q.qlen++;
                return NET_XMIT_SUCCESS;
        }
@@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
                struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
                if (skb) {
                        qdisc_bstats_update(sch, skb);
+                       qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
                        return skb;
                }
@@ -135,6 +137,7 @@ static unsigned int prio_drop(struct Qdisc *sch)
        for (prio = q->bands-1; prio >= 0; prio--) {
                qdisc = q->queues[prio];
                if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
+                       sch->qstats.backlog -= len;
                        sch->q.qlen--;
                        return len;
                }
@@ -151,6 +154,7 @@ prio_reset(struct Qdisc *sch)
 
        for (prio = 0; prio < q->bands; prio++)
                qdisc_reset(q->queues[prio]);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
 }
 
@@ -168,8 +172,9 @@ prio_destroy(struct Qdisc *sch)
 static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
 {
        struct prio_sched_data *q = qdisc_priv(sch);
+       struct Qdisc *queues[TCQ_PRIO_BANDS];
+       int oldbands = q->bands, i;
        struct tc_prio_qopt *qopt;
-       int i;
 
        if (nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
@@ -183,62 +188,42 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
                        return -EINVAL;
        }
 
+       /* Before commit, make sure we can allocate all new qdiscs */
+       for (i = oldbands; i < qopt->bands; i++) {
+               queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
+                                             TC_H_MAKE(sch->handle, i + 1));
+               if (!queues[i]) {
+                       while (i > oldbands)
+                               qdisc_destroy(queues[--i]);
+                       return -ENOMEM;
+               }
+       }
+
        sch_tree_lock(sch);
        q->bands = qopt->bands;
        memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-       for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
+       for (i = q->bands; i < oldbands; i++) {
                struct Qdisc *child = q->queues[i];
-               q->queues[i] = &noop_qdisc;
-               if (child != &noop_qdisc) {
-                       qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
-                       qdisc_destroy(child);
-               }
-       }
-       sch_tree_unlock(sch);
 
-       for (i = 0; i < q->bands; i++) {
-               if (q->queues[i] == &noop_qdisc) {
-                       struct Qdisc *child, *old;
-
-                       child = qdisc_create_dflt(sch->dev_queue,
-                                                 &pfifo_qdisc_ops,
-                                                 TC_H_MAKE(sch->handle, i + 1));
-                       if (child) {
-                               sch_tree_lock(sch);
-                               old = q->queues[i];
-                               q->queues[i] = child;
-
-                               if (old != &noop_qdisc) {
-                                       qdisc_tree_reduce_backlog(old,
-                                                                 old->q.qlen,
-                                                                 old->qstats.backlog);
-                                       qdisc_destroy(old);
-                               }
-                               sch_tree_unlock(sch);
-                       }
-               }
+               qdisc_tree_reduce_backlog(child, child->q.qlen,
+                                         child->qstats.backlog);
+               qdisc_destroy(child);
        }
+
+       for (i = oldbands; i < q->bands; i++)
+               q->queues[i] = queues[i];
+
+       sch_tree_unlock(sch);
        return 0;
 }
 
 static int prio_init(struct Qdisc *sch, struct nlattr *opt)
 {
-       struct prio_sched_data *q = qdisc_priv(sch);
-       int i;
-
-       for (i = 0; i < TCQ_PRIO_BANDS; i++)
-               q->queues[i] = &noop_qdisc;
-
-       if (opt == NULL) {
+       if (!opt)
                return -EINVAL;
-       } else {
-               int err;
 
-               if ((err = prio_tune(sch, opt)) != 0)
-                       return err;
-       }
-       return 0;
+       return prio_tune(sch, opt);
 }
 
 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
index 8d2d8d953432e83d8523cc42923793eb852f599c..f18857febdad0adcae567beaebd2520ec9932ce6 100644 (file)
@@ -1235,8 +1235,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                         cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
                err = qfq_change_agg(sch, cl, cl->agg->class_weight,
                                     qdisc_pkt_len(skb));
-               if (err)
-                       return err;
+               if (err) {
+                       cl->qstats.drops++;
+                       return qdisc_drop(skb, sch);
+               }
        }
 
        err = qdisc_enqueue(skb, cl->qdisc);
index 8c0508c0e287742a8fbbcbf49134e76e9e52b807..91578bdd378c943e66137fd986c1df6f1674bd27 100644 (file)
@@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        ret = qdisc_enqueue(skb, child);
        if (likely(ret == NET_XMIT_SUCCESS)) {
+               qdisc_qstats_backlog_inc(sch, skb);
                sch->q.qlen++;
        } else if (net_xmit_drop_count(ret)) {
                q->stats.pdrop++;
@@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
        skb = child->dequeue(child);
        if (skb) {
                qdisc_bstats_update(sch, skb);
+               qdisc_qstats_backlog_dec(sch, skb);
                sch->q.qlen--;
        } else {
                if (!red_is_idling(&q->vars))
@@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch)
        if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
                q->stats.other++;
                qdisc_qstats_drop(sch);
+               sch->qstats.backlog -= len;
                sch->q.qlen--;
                return len;
        }
@@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch)
        struct red_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset(q->qdisc);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
        red_restart(&q->vars);
 }
index 83b90b584fae4fcee779bdeb638df5c59f40f007..3161e491990b29254fdcb54ad3ff5d18dbf4d16f 100644 (file)
@@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                return ret;
        }
 
+       qdisc_qstats_backlog_inc(sch, skb);
        sch->q.qlen++;
        return NET_XMIT_SUCCESS;
 }
@@ -217,6 +218,7 @@ static unsigned int tbf_drop(struct Qdisc *sch)
        unsigned int len = 0;
 
        if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
+               sch->qstats.backlog -= len;
                sch->q.qlen--;
                qdisc_qstats_drop(sch);
        }
@@ -263,6 +265,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
                        q->t_c = now;
                        q->tokens = toks;
                        q->ptokens = ptoks;
+                       qdisc_qstats_backlog_dec(sch, skb);
                        sch->q.qlen--;
                        qdisc_unthrottled(sch);
                        qdisc_bstats_update(sch, skb);
@@ -294,6 +297,7 @@ static void tbf_reset(struct Qdisc *sch)
        struct tbf_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset(q->qdisc);
+       sch->qstats.backlog = 0;
        sch->q.qlen = 0;
        q->t_c = ktime_get_ns();
        q->tokens = q->buffer;
index a701527a9480faff1b8d91257e1dbf3c0f09ed68..47cf4604d19c23a4bf392c87d06de3e44680d5c0 100644 (file)
@@ -112,7 +112,6 @@ int sctp_rcv(struct sk_buff *skb)
        struct sctp_ep_common *rcvr;
        struct sctp_transport *transport = NULL;
        struct sctp_chunk *chunk;
-       struct sctphdr *sh;
        union sctp_addr src;
        union sctp_addr dest;
        int family;
@@ -127,8 +126,6 @@ int sctp_rcv(struct sk_buff *skb)
        if (skb_linearize(skb))
                goto discard_it;
 
-       sh = sctp_hdr(skb);
-
        /* Pull up the IP and SCTP headers. */
        __skb_pull(skb, skb_transport_offset(skb));
        if (skb->len < sizeof(struct sctphdr))
@@ -230,7 +227,7 @@ int sctp_rcv(struct sk_buff *skb)
        chunk->rcvr = rcvr;
 
        /* Remember the SCTP header. */
-       chunk->sctp_hdr = sh;
+       chunk->sctp_hdr = sctp_hdr(skb);
 
        /* Set the source and destination addresses of the incoming chunk.  */
        sctp_init_addrs(chunk, &src, &dest);
index 8e3e769dc9eafacf3d475c105ee8f39e89adcbcf..f69edcf219e514d864c4af2d57eb0429e8f4938a 100644 (file)
@@ -3,12 +3,6 @@
 #include <linux/sock_diag.h>
 #include <net/sctp/sctp.h>
 
-extern void inet_diag_msg_common_fill(struct inet_diag_msg *r,
-                                     struct sock *sk);
-extern int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
-                                   struct inet_diag_msg *r, int ext,
-                                   struct user_namespace *user_ns);
-
 static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
                               void *info);
 
@@ -356,6 +350,9 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
        if (cb->args[4] < cb->args[1])
                goto next;
 
+       if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs))
+               goto next;
+
        if (r->sdiag_family != AF_UNSPEC &&
            sk->sk_family != r->sdiag_family)
                goto next;
index 777d0324594a33a407e9ec157a7634334b1292e2..67154b848aa9bae93ce2bc6941e7974acb678142 100644 (file)
@@ -4220,6 +4220,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
                info->sctpi_s_disable_fragments = sp->disable_fragments;
                info->sctpi_s_v4mapped = sp->v4mapped;
                info->sctpi_s_frag_interleave = sp->frag_interleave;
+               info->sctpi_s_type = sp->type;
 
                return 0;
        }
index 06b4df9faaa1696b1e21ead9ddacea85cf41f760..2808d550d2730ff8e36b6d8c072c65e1631064c4 100644 (file)
@@ -446,16 +446,27 @@ out_no_rpciod:
        return ERR_PTR(err);
 }
 
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
                                        struct rpc_xprt *xprt)
 {
        struct rpc_clnt *clnt = NULL;
        struct rpc_xprt_switch *xps;
 
-       xps = xprt_switch_alloc(xprt, GFP_KERNEL);
-       if (xps == NULL)
-               return ERR_PTR(-ENOMEM);
-
+       if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
+               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               xps = args->bc_xprt->xpt_bc_xps;
+               xprt_switch_get(xps);
+       } else {
+               xps = xprt_switch_alloc(xprt, GFP_KERNEL);
+               if (xps == NULL) {
+                       xprt_put(xprt);
+                       return ERR_PTR(-ENOMEM);
+               }
+               if (xprt->bc_xprt) {
+                       xprt_switch_get(xps);
+                       xprt->bc_xprt->xpt_bc_xps = xps;
+               }
+       }
        clnt = rpc_new_client(args, xps, xprt, NULL);
        if (IS_ERR(clnt))
                return clnt;
@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
 
        return clnt;
 }
-EXPORT_SYMBOL_GPL(rpc_create_xprt);
 
 /**
  * rpc_create - create an RPC client and transport with one call
@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
        };
        char servername[48];
 
+       if (args->bc_xprt) {
+               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               xprt = args->bc_xprt->xpt_bc_xprt;
+               if (xprt) {
+                       xprt_get(xprt);
+                       return rpc_create_xprt(args, xprt);
+               }
+       }
+
        if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
                xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
        if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
index f5572e31d518f85b2afbbb9ec1b8e89d36448803..4f01f63102ee5d1ce936f54fa8feb3cedfc15607 100644 (file)
@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
        /* See comment on corresponding get in xs_setup_bc_tcp(): */
        if (xprt->xpt_bc_xprt)
                xprt_put(xprt->xpt_bc_xprt);
+       if (xprt->xpt_bc_xps)
+               xprt_switch_put(xprt->xpt_bc_xps);
        xprt->xpt_ops->xpo_free(xprt);
        module_put(owner);
 }
index 2d3e0c42361e6190555eb3b0cd9465851c88fd7b..7e2b2fa189c340e7f0968aead6f878879e8a9a72 100644 (file)
@@ -3057,6 +3057,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
                return xprt;
 
        args->bc_xprt->xpt_bc_xprt = NULL;
+       args->bc_xprt->xpt_bc_xps = NULL;
        xprt_put(xprt);
        ret = ERR_PTR(-EINVAL);
 out_err:
index 6f11c62bc8f9cbae5d9d69da5127e9f92e000f2f..a597708ae3818b25f9eaee8f1c3584865432a37d 100644 (file)
@@ -330,6 +330,21 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
        return 0;
 }
 
+/* tipc_bearer_reset_all - reset all links on all bearers
+ */
+void tipc_bearer_reset_all(struct net *net)
+{
+       struct tipc_net *tn = tipc_net(net);
+       struct tipc_bearer *b;
+       int i;
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+               b = rcu_dereference_rtnl(tn->bearer_list[i]);
+               if (b)
+                       tipc_reset_bearer(net, b);
+       }
+}
+
 /**
  * bearer_disable
  *
@@ -405,7 +420,7 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
                return 0;
 
        /* Send RESET message even if bearer is detached from device */
-       tipc_ptr = rtnl_dereference(dev->tipc_ptr);
+       tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
        if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
                goto drop;
 
index f686e41b5abb880cc09f9c1a707356047d32749f..60e49c3be19c1e44b44175926b10c6aecb64fa58 100644 (file)
@@ -198,6 +198,7 @@ void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
 void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
 struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
 struct tipc_media *tipc_media_find(const char *name);
+void tipc_bearer_reset_all(struct net *net);
 int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
index 7059c94f33c55f03ce70e193a017cbf2bb441f3e..7d89f8713d4984f3a9d732c9cabc56d410219ad8 100644 (file)
@@ -349,6 +349,8 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
        u16 ack = snd_l->snd_nxt - 1;
 
        snd_l->ackers--;
+       rcv_l->bc_peer_is_up = true;
+       rcv_l->state = LINK_ESTABLISHED;
        tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
        tipc_link_reset(rcv_l);
        rcv_l->state = LINK_RESET;
@@ -704,7 +706,8 @@ static void link_profile_stats(struct tipc_link *l)
  */
 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-       int mtyp, rc = 0;
+       int mtyp = 0;
+       int rc = 0;
        bool state = false;
        bool probe = false;
        bool setup = false;
@@ -1558,7 +1561,12 @@ void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
        if (!msg_peer_node_is_up(hdr))
                return;
 
-       l->bc_peer_is_up = true;
+       /* Open when peer ackowledges our bcast init msg (pkt #1) */
+       if (msg_ack(hdr))
+               l->bc_peer_is_up = true;
+
+       if (!l->bc_peer_is_up)
+               return;
 
        /* Ignore if peers_snd_nxt goes beyond receive window */
        if (more(peers_snd_nxt, l->rcv_nxt + l->window))
index 8740930f07872ff1ec8ff9d6b712b2772ba48336..17201aa8423ddd816a4792621e289dbeee8d4928 100644 (file)
@@ -41,6 +41,8 @@
 #include "name_table.h"
 
 #define MAX_FORWARD_SIZE 1024
+#define BUF_HEADROOM (LL_MAX_HEADER + 48)
+#define BUF_TAILROOM 16
 
 static unsigned int align(unsigned int i)
 {
@@ -505,6 +507,10 @@ bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
                msg_set_hdr_sz(hdr, BASIC_H_SIZE);
        }
 
+       if (skb_cloned(_skb) &&
+           pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
+               goto exit;
+
        /* Now reverse the concerned fields */
        msg_set_errcode(hdr, err);
        msg_set_origport(hdr, msg_destport(&ohdr));
index 024da8af91f0edf55e78c30edc682fe5741c6198..7cf52fb39bee6aa38379e81af31fe3d5c6fea557 100644 (file)
@@ -94,17 +94,6 @@ struct plist;
 
 #define TIPC_MEDIA_INFO_OFFSET 5
 
-/**
- * TIPC message buffer code
- *
- * TIPC message buffer headroom reserves space for the worst-case
- * link-level device header (in case the message is sent off-node).
- *
- * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
- *       are word aligned for quicker access
- */
-#define BUF_HEADROOM (LL_MAX_HEADER + 48)
-
 struct tipc_skb_cb {
        void *handle;
        struct sk_buff *tail;
index 4dfc5c14f8c3c3f38a0fa0f1bfdcf40ff61ace07..1fd4647647650b75f17f41d19d288be7abe436a3 100644 (file)
@@ -346,9 +346,15 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
                                      struct nlattr **attrs)
 {
        struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
+       int err;
+
+       if (!attrs[TIPC_NLA_BEARER])
+               return -EINVAL;
 
-       nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER],
-                        NULL);
+       err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX,
+                              attrs[TIPC_NLA_BEARER], NULL);
+       if (err)
+               return err;
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
                            nla_data(bearer[TIPC_NLA_BEARER_NAME]),
@@ -460,14 +466,31 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
        struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
        struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
        struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
+       int err;
 
-       nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
+       if (!attrs[TIPC_NLA_LINK])
+               return -EINVAL;
 
-       nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP],
-                        NULL);
+       err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
+                              NULL);
+       if (err)
+               return err;
+
+       if (!link[TIPC_NLA_LINK_PROP])
+               return -EINVAL;
 
-       nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS],
-                        NULL);
+       err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
+                              link[TIPC_NLA_LINK_PROP], NULL);
+       if (err)
+               return err;
+
+       if (!link[TIPC_NLA_LINK_STATS])
+               return -EINVAL;
+
+       err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
+                              link[TIPC_NLA_LINK_STATS], NULL);
+       if (err)
+               return err;
 
        name = (char *)TLV_DATA(msg->req);
        if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
@@ -569,12 +592,20 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
 {
        struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
        struct tipc_link_info link_info;
+       int err;
 
-       nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
+       if (!attrs[TIPC_NLA_LINK])
+               return -EINVAL;
+
+       err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
+                              NULL);
+       if (err)
+               return err;
 
        link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
        link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
-       strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
+       nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+                   TIPC_MAX_LINK_NAME);
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
                            &link_info, sizeof(link_info));
@@ -758,12 +789,23 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
        u32 node, depth, type, lowbound, upbound;
        static const char * const scope_str[] = {"", " zone", " cluster",
                                                 " node"};
+       int err;
 
-       nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
-                        attrs[TIPC_NLA_NAME_TABLE], NULL);
+       if (!attrs[TIPC_NLA_NAME_TABLE])
+               return -EINVAL;
 
-       nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL],
-                        NULL);
+       err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
+                              attrs[TIPC_NLA_NAME_TABLE], NULL);
+       if (err)
+               return err;
+
+       if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
+               return -EINVAL;
+
+       err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX,
+                              nt[TIPC_NLA_NAME_TABLE_PUBL], NULL);
+       if (err)
+               return err;
 
        ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
 
@@ -815,8 +857,15 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
 {
        u32 type, lower, upper;
        struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
+       int err;
 
-       nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL);
+       if (!attrs[TIPC_NLA_PUBL])
+               return -EINVAL;
+
+       err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL],
+                              NULL);
+       if (err)
+               return err;
 
        type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
        lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
@@ -876,7 +925,13 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
        u32 sock_ref;
        struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
 
-       nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL);
+       if (!attrs[TIPC_NLA_SOCK])
+               return -EINVAL;
+
+       err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK],
+                              NULL);
+       if (err)
+               return err;
 
        sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
        tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
@@ -917,9 +972,15 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
                                     struct nlattr **attrs)
 {
        struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
+       int err;
+
+       if (!attrs[TIPC_NLA_MEDIA])
+               return -EINVAL;
 
-       nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
-                        NULL);
+       err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
+                              NULL);
+       if (err)
+               return err;
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
                            nla_data(media[TIPC_NLA_MEDIA_NAME]),
@@ -931,8 +992,15 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
 {
        struct tipc_node_info node_info;
        struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
+       int err;
 
-       nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL);
+       if (!attrs[TIPC_NLA_NODE])
+               return -EINVAL;
+
+       err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE],
+                              NULL);
+       if (err)
+               return err;
 
        node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
        node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
@@ -971,8 +1039,16 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
 {
        __be32 id;
        struct nlattr *net[TIPC_NLA_NET_MAX + 1];
+       int err;
+
+       if (!attrs[TIPC_NLA_NET])
+               return -EINVAL;
+
+       err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET],
+                              NULL);
+       if (err)
+               return err;
 
-       nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL);
        id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
index e01e2c71b5a16fde975b6e87bd134ae57cd57326..23d4761842a0ed9ac62eb1af5cc03259aadeb22d 100644 (file)
@@ -1297,10 +1297,6 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
 
        rc = tipc_bcast_rcv(net, be->link, skb);
 
-       /* Broadcast link reset may happen at reassembly failure */
-       if (rc & TIPC_LINK_DOWN_EVT)
-               tipc_node_reset_links(n);
-
        /* Broadcast ACKs are sent on a unicast link */
        if (rc & TIPC_LINK_SND_BC_ACK) {
                tipc_node_read_lock(n);
@@ -1320,6 +1316,17 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
                spin_unlock_bh(&be->inputq2.lock);
                tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
        }
+
+       if (rc & TIPC_LINK_DOWN_EVT) {
+               /* Reception reassembly failure => reset all links to peer */
+               if (!tipc_link_is_up(be->link))
+                       tipc_node_reset_links(n);
+
+               /* Retransmission failure => reset all links to all peers */
+               if (!tipc_link_is_up(tipc_bc_sndlink(net)))
+                       tipc_bearer_reset_all(net);
+       }
+
        tipc_node_put(n);
 }
 
index 88bfcd707064145cac007eb7ff88fb1a2eb77418..c49b8df438cbeee021bbedf3c96631d82fb16670 100644 (file)
@@ -796,9 +796,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
  * @tsk: receiving socket
  * @skb: pointer to message buffer.
  */
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+                             struct sk_buff_head *xmitq)
 {
        struct sock *sk = &tsk->sk;
+       u32 onode = tsk_own_node(tsk);
        struct tipc_msg *hdr = buf_msg(skb);
        int mtyp = msg_type(hdr);
        bool conn_cong;
@@ -811,7 +813,8 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
 
        if (mtyp == CONN_PROBE) {
                msg_set_type(hdr, CONN_PROBE_REPLY);
-               tipc_sk_respond(sk, skb, TIPC_OK);
+               if (tipc_msg_reverse(onode, &skb, TIPC_OK))
+                       __skb_queue_tail(xmitq, skb);
                return;
        } else if (mtyp == CONN_ACK) {
                conn_cong = tsk_conn_cong(tsk);
@@ -1686,7 +1689,8 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
  *
  * Returns true if message was added to socket receive queue, otherwise false
  */
-static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
+                      struct sk_buff_head *xmitq)
 {
        struct socket *sock = sk->sk_socket;
        struct tipc_sock *tsk = tipc_sk(sk);
@@ -1696,7 +1700,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
        int usr = msg_user(hdr);
 
        if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
-               tipc_sk_proto_rcv(tsk, skb);
+               tipc_sk_proto_rcv(tsk, skb, xmitq);
                return false;
        }
 
@@ -1739,7 +1743,8 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
        return true;
 
 reject:
-       tipc_sk_respond(sk, skb, err);
+       if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
+               __skb_queue_tail(xmitq, skb);
        return false;
 }
 
@@ -1755,9 +1760,24 @@ reject:
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
        unsigned int truesize = skb->truesize;
+       struct sk_buff_head xmitq;
+       u32 dnode, selector;
 
-       if (likely(filter_rcv(sk, skb)))
+       __skb_queue_head_init(&xmitq);
+
+       if (likely(filter_rcv(sk, skb, &xmitq))) {
                atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
+               return 0;
+       }
+
+       if (skb_queue_empty(&xmitq))
+               return 0;
+
+       /* Send response/rejected message */
+       skb = __skb_dequeue(&xmitq);
+       dnode = msg_destnode(buf_msg(skb));
+       selector = msg_origport(buf_msg(skb));
+       tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
        return 0;
 }
 
@@ -1771,12 +1791,13 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  * Caller must hold socket lock
  */
 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
-                           u32 dport)
+                           u32 dport, struct sk_buff_head *xmitq)
 {
+       unsigned long time_limit = jiffies + 2;
+       struct sk_buff *skb;
        unsigned int lim;
        atomic_t *dcnt;
-       struct sk_buff *skb;
-       unsigned long time_limit = jiffies + 2;
+       u32 onode;
 
        while (skb_queue_len(inputq)) {
                if (unlikely(time_after_eq(jiffies, time_limit)))
@@ -1788,7 +1809,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
 
                /* Add message directly to receive queue if possible */
                if (!sock_owned_by_user(sk)) {
-                       filter_rcv(sk, skb);
+                       filter_rcv(sk, skb, xmitq);
                        continue;
                }
 
@@ -1801,7 +1822,9 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
                        continue;
 
                /* Overload => reject message back to sender */
-               tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+               onode = tipc_own_addr(sock_net(sk));
+               if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+                       __skb_queue_tail(xmitq, skb);
                break;
        }
 }
@@ -1814,12 +1837,14 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
  */
 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
+       struct sk_buff_head xmitq;
        u32 dnode, dport = 0;
        int err;
        struct tipc_sock *tsk;
        struct sock *sk;
        struct sk_buff *skb;
 
+       __skb_queue_head_init(&xmitq);
        while (skb_queue_len(inputq)) {
                dport = tipc_skb_peek_port(inputq, dport);
                tsk = tipc_sk_lookup(net, dport);
@@ -1827,9 +1852,14 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
                if (likely(tsk)) {
                        sk = &tsk->sk;
                        if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
-                               tipc_sk_enqueue(inputq, sk, dport);
+                               tipc_sk_enqueue(inputq, sk, dport, &xmitq);
                                spin_unlock_bh(&sk->sk_lock.slock);
                        }
+                       /* Send pending response/rejected messages, if any */
+                       while ((skb = __skb_dequeue(&xmitq))) {
+                               dnode = msg_destnode(buf_msg(skb));
+                               tipc_node_xmit_skb(net, skb, dnode, dport);
+                       }
                        sock_put(sk);
                        continue;
                }
index 80aa6a3e681763232ba7f5c61663761c7d3ec0d3..735362c26c8e5a3329d3cc6517b330167b393db6 100644 (file)
@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
                    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
                struct dentry *dentry = unix_sk(s)->path.dentry;
 
-               if (dentry && d_backing_inode(dentry) == i) {
+               if (dentry && d_real_inode(dentry) == i) {
                        sock_hold(s);
                        goto found;
                }
@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
                err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
                if (err)
                        goto fail;
-               inode = d_backing_inode(path.dentry);
+               inode = d_real_inode(path.dentry);
                err = inode_permission(inode, MAY_WRITE);
                if (err)
                        goto put_fail;
@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                        goto out_up;
                }
                addr->hash = UNIX_HASH_SIZE;
-               hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+               hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
                spin_lock(&unix_table_lock);
                u->path = u_path;
                list = &unix_socket_table[hash];
index b5f1221f48d4859156aa640066e1fd80cf2927dc..b96ac918e0ba026d74e2a02dda99b8eb0f613267 100644 (file)
  * function will also cleanup rejected sockets, those that reach the connected
  * state but leave it before they have been accepted.
  *
+ * - Lock ordering for pending or accept queue sockets is:
+ *
+ *     lock_sock(listener);
+ *     lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
+ *
+ * Using explicit nested locking keeps lockdep happy since normally only one
+ * lock of a given class may be taken at a time.
+ *
  * - Sockets created by user action will be cleaned up when the user process
  * calls close(2), causing our release implementation to be called. Our release
  * implementation will perform some cleanup then drop the last reference so our
@@ -443,7 +451,7 @@ void vsock_pending_work(struct work_struct *work)
        cleanup = true;
 
        lock_sock(listener);
-       lock_sock(sk);
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
 
        if (vsock_is_pending(sk)) {
                vsock_remove_pending(listener, sk);
@@ -1292,7 +1300,7 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
        if (connected) {
                listener->sk_ack_backlog--;
 
-               lock_sock(connected);
+               lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
                vconnected = vsock_sk(connected);
 
                /* If the listener socket has received an error, then we should
index d25c82bc1bbe8e80ee0761b466e16088040c717d..ecca3896b9f7a2f18f1cdcfa2d38b2d857decf27 100644 (file)
@@ -363,8 +363,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
        WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
        WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
        WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
-       WARN_ON(ops->set_tx_power && !ops->get_tx_power);
-       WARN_ON(ops->set_antenna && !ops->get_antenna);
 
        alloc_size = sizeof(*rdev) + sizeof_priv;
 
index d7599014055dfc1c3d256d917e3a3645f1a27143..7d72283901a3bb8fa7d3294e5dcdfbc6cd61e3a2 100644 (file)
@@ -3487,16 +3487,16 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                params.smps_mode = NL80211_SMPS_OFF;
        }
 
+       params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+       if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
+               return -EOPNOTSUPP;
+
        if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
                params.acl = parse_acl_data(&rdev->wiphy, info);
                if (IS_ERR(params.acl))
                        return PTR_ERR(params.acl);
        }
 
-       params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
-       if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
-               return -EOPNOTSUPP;
-
        wdev_lock(wdev);
        err = rdev_start_ap(rdev, dev, &params);
        if (!err) {
index 4e809e978b7d274f3967349c87acda43aac57370..b7d1592bd5b8939f50d01ee2ce11a95c0df0e93b 100644 (file)
@@ -509,7 +509,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
                 * replace EtherType */
                hdrlen += ETH_ALEN + 2;
        else
-               tmp.h_proto = htons(skb->len);
+               tmp.h_proto = htons(skb->len - hdrlen);
 
        pskb_pull(skb, hdrlen);
 
@@ -721,6 +721,8 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
         * alignment since sizeof(struct ethhdr) is 14.
         */
        frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
+       if (!frame)
+               return NULL;
 
        skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
        skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
index 6250b1cfcde58758bb480758d1c61217d37a7cd1..dbb2738e356ad83785caa748a0b84352a189bb8b 100644 (file)
@@ -958,8 +958,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
                        return private(dev, iwr, cmd, info, handler);
        }
        /* Old driver API : call driver ioctl handler */
-       if (dev->netdev_ops->ndo_do_ioctl)
-               return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+       if (dev->netdev_ops->ndo_do_ioctl) {
+#ifdef CONFIG_COMPAT
+               if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+                       int ret = 0;
+                       struct iwreq iwr_lcl;
+                       struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+
+                       memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+                       iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+                       iwr_lcl.u.data.length = iwp_compat->length;
+                       iwr_lcl.u.data.flags = iwp_compat->flags;
+
+                       ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+
+                       iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+                       iwp_compat->length = iwr_lcl.u.data.length;
+                       iwp_compat->flags = iwr_lcl.u.data.flags;
+
+                       return ret;
+               } else
+#endif
+                       return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+       }
        return -EOPNOTSUPP;
 }
 
index 6750595bd7b819883b5b4ae8dde3fdfacf0f37fb..4904ced676d40289356aa3358f894fa7efa4b5c0 100755 (executable)
@@ -2454,6 +2454,7 @@ sub process {
 
 # Check for git id commit length and improperly formed commit descriptions
                if ($in_commit_log && !$commit_log_possible_stack_dump &&
+                   $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i &&
                    ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i ||
                     ($line =~ /\b[0-9a-f]{12,40}\b/i &&
                      $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i &&
index 52e4e61140d1226f5ba80676d1973baa298d3b23..2573543842d06947cf081ff3cf752c8d309f094b 100644 (file)
@@ -1,2 +1,3 @@
 *.pyc
 *.pyo
+constants.py
index cd129e65d1ffdbbb06e6f1096340e00f1cff8d9e..8b00031f53497035b68bfe65bd682989430fe016 100644 (file)
@@ -13,9 +13,11 @@ quiet_cmd_gen_constants_py = GEN     $@
        $(CPP) -E -x c -P $(c_flags) $< > $@ ;\
        sed -i '1,/<!-- end-c-headers -->/d;' $@
 
-$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in
-       $(call if_changed,gen_constants_py)
+targets += constants.py
+$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in FORCE
+       $(call if_changed_dep,gen_constants_py)
 
 build_constants_py: $(obj)/constants.py
+       @:
 
 clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py
index 07e6c2befe368665ed7b35bd4fa9f08e1600d4dc..7986f4e0da123a240eeca854666dd3cfac86d69b 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <linux/fs.h>
 #include <linux/mount.h>
-#include <linux/radix-tree.h>
 
 /* We need to stringify expanded macros so that they can be parsed */
 
@@ -51,9 +50,3 @@ LX_VALUE(MNT_NOEXEC)
 LX_VALUE(MNT_NOATIME)
 LX_VALUE(MNT_NODIRATIME)
 LX_VALUE(MNT_RELATIME)
-
-/* linux/radix-tree.h */
-LX_VALUE(RADIX_TREE_INDIRECT_PTR)
-LX_GDBPARSED(RADIX_TREE_HEIGHT_MASK)
-LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
-LX_GDBPARSED(RADIX_TREE_MAP_MASK)
diff --git a/scripts/gdb/linux/radixtree.py b/scripts/gdb/linux/radixtree.py
deleted file mode 100644 (file)
index 0fdef4e..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-#
-# gdb helper commands and functions for Linux kernel debugging
-#
-#  Radix Tree Parser
-#
-# Copyright (c) 2016 Linaro Ltd
-#
-# Authors:
-#  Kieran Bingham <kieran.bingham@linaro.org>
-#
-# This work is licensed under the terms of the GNU GPL version 2.
-#
-
-import gdb
-
-from linux import utils
-from linux import constants
-
-radix_tree_root_type = utils.CachedType("struct radix_tree_root")
-radix_tree_node_type = utils.CachedType("struct radix_tree_node")
-
-
-def is_indirect_ptr(node):
-    long_type = utils.get_long_type()
-    return (node.cast(long_type) & constants.LX_RADIX_TREE_INDIRECT_PTR)
-
-
-def indirect_to_ptr(node):
-    long_type = utils.get_long_type()
-    node_type = node.type
-    indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INDIRECT_PTR
-    return indirect_ptr.cast(node_type)
-
-
-def maxindex(height):
-    height = height & constants.LX_RADIX_TREE_HEIGHT_MASK
-    return gdb.parse_and_eval("height_to_maxindex["+str(height)+"]")
-
-
-def lookup(root, index):
-    if root.type == radix_tree_root_type.get_type().pointer():
-        root = root.dereference()
-    elif root.type != radix_tree_root_type.get_type():
-        raise gdb.GdbError("Must be struct radix_tree_root not {}"
-                           .format(root.type))
-
-    node = root['rnode']
-    if node is 0:
-        return None
-
-    if not (is_indirect_ptr(node)):
-        if (index > 0):
-            return None
-        return node
-
-    node = indirect_to_ptr(node)
-
-    height = node['path'] & constants.LX_RADIX_TREE_HEIGHT_MASK
-    if (index > maxindex(height)):
-        return None
-
-    shift = (height-1) * constants.LX_RADIX_TREE_MAP_SHIFT
-
-    while True:
-        new_index = (index >> shift) & constants.LX_RADIX_TREE_MAP_MASK
-        slot = node['slots'][new_index]
-
-        node = slot.cast(node.type.pointer()).dereference()
-        if node is 0:
-            return None
-
-        shift -= constants.LX_RADIX_TREE_MAP_SHIFT
-        height -= 1
-
-        if (height <= 0):
-            break
-
-    return node
-
-
-class LxRadixTree(gdb.Function):
-    """ Lookup and return a node from a RadixTree.
-
-$lx_radix_tree_lookup(root_node [, index]): Return the node at the given index.
-If index is omitted, the root node is dereferenced and returned."""
-
-    def __init__(self):
-        super(LxRadixTree, self).__init__("lx_radix_tree_lookup")
-
-    def invoke(self, root, index=0):
-        result = lookup(root, index)
-        if result is None:
-            raise gdb.GdbError("No entry in tree at index {}".format(index))
-
-        return result
-
-LxRadixTree()
index 9a0f8923f67ccb870224a93d8884458a1fa11a30..004b0ac7fa72d25598d26fbab4b2035e3e882305 100644 (file)
@@ -153,7 +153,7 @@ lx-symbols command."""
             saved_state['breakpoint'].enabled = saved_state['enabled']
 
     def invoke(self, arg, from_tty):
-        self.module_paths = arg.split()
+        self.module_paths = [os.path.expanduser(p) for p in arg.split()]
         self.module_paths.append(os.getcwd())
 
         # enforce update
index 3a80ad6eecad7167c3bb11fb2314f1d1c43c0a89..6e0b0afd888ade32768edba8e659b61a4f396ac0 100644 (file)
@@ -31,4 +31,3 @@ else:
     import linux.lists
     import linux.proc
     import linux.constants
-    import linux.radixtree
index 2fc8fad5195e8994a320013cd8c731a0d9469f63..27757c21551aecf804aa95ccd56434c9a33c3832 100755 (executable)
@@ -59,6 +59,12 @@ Output format selection (mutually exclusive):
   -text                        Output plain text format.
 
 Output selection (mutually exclusive):
+  -export              Only output documentation for symbols that have been
+                       exported using EXPORT_SYMBOL() or EXPORT_SYMBOL_GPL()
+                       in the same FILE.
+  -internal            Only output documentation for symbols that have NOT been
+                       exported using EXPORT_SYMBOL() or EXPORT_SYMBOL_GPL()
+                       in the same FILE.
   -function NAME       Only output documentation for the given function(s)
                        or DOC: section title(s). All other functions and DOC:
                        sections are ignored. May be specified multiple times.
@@ -68,6 +74,8 @@ Output selection (mutually exclusive):
 
 Output selection modifiers:
   -no-doc-sections     Do not output DOC: sections.
+  -enable-lineno        Enable output of #define LINENO lines. Only works with
+                        reStructuredText format.
 
 Other parameters:
   -v                   Verbose output, more warnings and other information.
@@ -206,6 +214,10 @@ my $type_struct_xml = '\\&amp;((struct\s*)*[_\w]+)';
 my $type_env = '(\$\w+)';
 my $type_enum_full = '\&(enum)\s*([_\w]+)';
 my $type_struct_full = '\&(struct)\s*([_\w]+)';
+my $type_typedef_full = '\&(typedef)\s*([_\w]+)';
+my $type_union_full = '\&(union)\s*([_\w]+)';
+my $type_member = '\&([_\w]+)((\.|->)[_\w]+)';
+my $type_member_func = $type_member . '\(\)';
 
 # Output conversion substitutions.
 #  One for each output format
@@ -274,10 +286,16 @@ my $blankline_text = "";
 # rst-mode
 my @highlights_rst = (
                        [$type_constant, "``\$1``"],
-                       [$type_func, "\\:c\\:func\\:`\$1`"],
+                       # Note: need to escape () to avoid func matching later
+                       [$type_member_func, "\\:c\\:type\\:`\$1\$2\\\\(\\\\) <\$1>`"],
+                       [$type_member, "\\:c\\:type\\:`\$1\$2 <\$1>`"],
+                       [$type_func, "\\:c\\:func\\:`\$1()`"],
                        [$type_struct_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
                        [$type_enum_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
-                       [$type_struct, "\\:c\\:type\\:`struct \$1 <\$1>`"],
+                       [$type_typedef_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
+                       [$type_union_full, "\\:c\\:type\\:`\$1 \$2 <\$2>`"],
+                       # in rst this can refer to any type
+                       [$type_struct, "\\:c\\:type\\:`\$1`"],
                        [$type_param, "**\$1**"]
                      );
 my $blankline_rst = "\n";
@@ -303,10 +321,19 @@ my $verbose = 0;
 my $output_mode = "man";
 my $output_preformatted = 0;
 my $no_doc_sections = 0;
+my $enable_lineno = 0;
 my @highlights = @highlights_man;
 my $blankline = $blankline_man;
 my $modulename = "Kernel API";
-my $function_only = 0;
+
+use constant {
+    OUTPUT_ALL          => 0, # output all symbols and doc sections
+    OUTPUT_INCLUDE      => 1, # output only specified symbols
+    OUTPUT_EXCLUDE      => 2, # output everything except specified symbols
+    OUTPUT_EXPORTED     => 3, # output exported symbols
+    OUTPUT_INTERNAL     => 4, # output non-exported symbols
+};
+my $output_selection = OUTPUT_ALL;
 my $show_not_found = 0;
 
 my @build_time;
@@ -327,6 +354,7 @@ my $man_date = ('January', 'February', 'March', 'April', 'May', 'June',
 # CAVEAT EMPTOR!  Some of the others I localised may not want to be, which
 # could cause "use of undefined value" or other bugs.
 my ($function, %function_table, %parametertypes, $declaration_purpose);
+my $declaration_start_line;
 my ($type, $declaration_name, $return_type);
 my ($newsection, $newcontents, $prototype, $brcount, %source_map);
 
@@ -344,52 +372,62 @@ my $section_counter = 0;
 
 my $lineprefix="";
 
-# states
-# 0 - normal code
-# 1 - looking for function name
-# 2 - scanning field start.
-# 3 - scanning prototype.
-# 4 - documentation block
-# 5 - gathering documentation outside main block
+# Parser states
+use constant {
+    STATE_NORMAL        => 0, # normal code
+    STATE_NAME          => 1, # looking for function name
+    STATE_FIELD         => 2, # scanning field start
+    STATE_PROTO         => 3, # scanning prototype
+    STATE_DOCBLOCK      => 4, # documentation block
+    STATE_INLINE        => 5, # gathering documentation outside main block
+};
 my $state;
 my $in_doc_sect;
 
-# Split Doc State
-# 0 - Invalid (Before start or after finish)
-# 1 - Is started (the /** was found inside a struct)
-# 2 - The @parameter header was found, start accepting multi paragraph text.
-# 3 - Finished (the */ was found)
-# 4 - Error - Comment without header was found. Spit a warning as it's not
-#     proper kernel-doc and ignore the rest.
-my $split_doc_state;
+# Inline documentation state
+use constant {
+    STATE_INLINE_NA     => 0, # not applicable ($state != STATE_INLINE)
+    STATE_INLINE_NAME   => 1, # looking for member name (@foo:)
+    STATE_INLINE_TEXT   => 2, # looking for member documentation
+    STATE_INLINE_END    => 3, # done
+    STATE_INLINE_ERROR  => 4, # error - Comment without header was found.
+                              # Spit a warning as it's not
+                              # proper kernel-doc and ignore the rest.
+};
+my $inline_doc_state;
 
 #declaration types: can be
 # 'function', 'struct', 'union', 'enum', 'typedef'
 my $decl_type;
 
-my $doc_special = "\@\%\$\&";
-
 my $doc_start = '^/\*\*\s*$'; # Allow whitespace at end of comment start.
 my $doc_end = '\*/';
 my $doc_com = '\s*\*\s*';
 my $doc_com_body = '\s*\* ?';
 my $doc_decl = $doc_com . '(\w+)';
-my $doc_sect = $doc_com . '([' . $doc_special . ']?[\w\s]+):(.*)';
+# @params and a strictly limited set of supported section names
+my $doc_sect = $doc_com . 
+    '\s*(\@\w+|description|context|returns?|notes?|examples?)\s*:(.*)';
 my $doc_content = $doc_com_body . '(.*)';
 my $doc_block = $doc_com . 'DOC:\s*(.*)?';
-my $doc_split_start = '^\s*/\*\*\s*$';
-my $doc_split_sect = '\s*\*\s*(@[\w\s]+):(.*)';
-my $doc_split_end = '^\s*\*/\s*$';
+my $doc_inline_start = '^\s*/\*\*\s*$';
+my $doc_inline_sect = '\s*\*\s*(@[\w\s]+):(.*)';
+my $doc_inline_end = '^\s*\*/\s*$';
+my $export_symbol = '^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*;';
 
-my %constants;
 my %parameterdescs;
+my %parameterdesc_start_lines;
 my @parameterlist;
 my %sections;
 my @sectionlist;
+my %section_start_lines;
 my $sectcheck;
 my $struct_actual;
 
 my $contents = "";
+my $new_start_line = 0;
+
+# the canonical section names. see also $doc_sect above.
 my $section_default = "Description";   # default section
 my $section_intro = "Introduction";
 my $section = $section_default;
@@ -437,19 +475,27 @@ while ($ARGV[0] =~ m/^-(.*)/) {
     } elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document
        $modulename = shift @ARGV;
     } elsif ($cmd eq "-function") { # to only output specific functions
-       $function_only = 1;
+       $output_selection = OUTPUT_INCLUDE;
        $function = shift @ARGV;
        $function_table{$function} = 1;
-    } elsif ($cmd eq "-nofunction") { # to only output specific functions
-       $function_only = 2;
+    } elsif ($cmd eq "-nofunction") { # output all except specific functions
+       $output_selection = OUTPUT_EXCLUDE;
        $function = shift @ARGV;
        $function_table{$function} = 1;
+    } elsif ($cmd eq "-export") { # only exported symbols
+       $output_selection = OUTPUT_EXPORTED;
+       %function_table = ()
+    } elsif ($cmd eq "-internal") { # only non-exported symbols
+       $output_selection = OUTPUT_INTERNAL;
+       %function_table = ()
     } elsif ($cmd eq "-v") {
        $verbose = 1;
     } elsif (($cmd eq "-h") || ($cmd eq "--help")) {
        usage();
     } elsif ($cmd eq '-no-doc-sections') {
            $no_doc_sections = 1;
+    } elsif ($cmd eq '-enable-lineno') {
+           $enable_lineno = 1;
     } elsif ($cmd eq '-show-not-found') {
        $show_not_found = 1;
     }
@@ -467,6 +513,13 @@ sub get_kernel_version() {
     return $version;
 }
 
+#
+sub print_lineno {
+    my $lineno = shift;
+    if ($enable_lineno && defined($lineno)) {
+        print "#define LINENO " . $lineno . "\n";
+    }
+}
 ##
 # dumps section contents to arrays/hashes intended for that purpose.
 #
@@ -475,28 +528,32 @@ sub dump_section {
     my $name = shift;
     my $contents = join "\n", @_;
 
-    if ($name =~ m/$type_constant/) {
-       $name = $1;
-#      print STDERR "constant section '$1' = '$contents'\n";
-       $constants{$name} = $contents;
-    } elsif ($name =~ m/$type_param/) {
+    if ($name =~ m/$type_param/) {
 #      print STDERR "parameter def '$1' = '$contents'\n";
        $name = $1;
        $parameterdescs{$name} = $contents;
        $sectcheck = $sectcheck . $name . " ";
+        $parameterdesc_start_lines{$name} = $new_start_line;
+        $new_start_line = 0;
     } elsif ($name eq "@\.\.\.") {
 #      print STDERR "parameter def '...' = '$contents'\n";
        $name = "...";
        $parameterdescs{$name} = $contents;
        $sectcheck = $sectcheck . $name . " ";
+        $parameterdesc_start_lines{$name} = $new_start_line;
+        $new_start_line = 0;
     } else {
 #      print STDERR "other section '$name' = '$contents'\n";
        if (defined($sections{$name}) && ($sections{$name} ne "")) {
-               print STDERR "${file}:$.: error: duplicate section name '$name'\n";
-               ++$errors;
+           print STDERR "${file}:$.: warning: duplicate section name '$name'\n";
+           ++$warnings;
+           $sections{$name} .= $contents;
+       } else {
+           $sections{$name} = $contents;
+           push @sectionlist, $name;
+            $section_start_lines{$name} = $new_start_line;
+            $new_start_line = 0;
        }
-       $sections{$name} = $contents;
-       push @sectionlist, $name;
     }
 }
 
@@ -512,15 +569,17 @@ sub dump_doc_section {
         return;
     }
 
-    if (($function_only == 0) ||
-       ( $function_only == 1 && defined($function_table{$name})) ||
-       ( $function_only == 2 && !defined($function_table{$name})))
+    if (($output_selection == OUTPUT_ALL) ||
+       ($output_selection == OUTPUT_INCLUDE &&
+        defined($function_table{$name})) ||
+       ($output_selection == OUTPUT_EXCLUDE &&
+        !defined($function_table{$name})))
     {
        dump_section($file, $name, $contents);
        output_blockhead({'sectionlist' => \@sectionlist,
                          'sections' => \%sections,
                          'module' => $modulename,
-                         'content-only' => ($function_only != 0), });
+                         'content-only' => ($output_selection != OUTPUT_ALL), });
     }
 }
 
@@ -1736,7 +1795,10 @@ sub output_blockhead_rst(%) {
     my ($parameter, $section);
 
     foreach $section (@{$args{'sectionlist'}}) {
-       print "**$section**\n\n";
+       if ($output_selection != OUTPUT_INCLUDE) {
+           print "**$section**\n\n";
+       }
+        print_lineno($section_start_lines{$section});
        output_highlight_rst($args{'sections'}{$section});
        print "\n";
     }
@@ -1753,19 +1815,14 @@ sub output_highlight_rst {
     die $@ if $@;
 
     foreach $line (split "\n", $contents) {
-       if ($line eq "") {
-           print $lineprefix, $blankline;
-       } else {
-           $line =~ s/\\\\\\/\&/g;
-           print $lineprefix, $line;
-       }
-       print "\n";
+       print $lineprefix . $line . "\n";
     }
 }
 
 sub output_function_rst(%) {
     my %args = %{$_[0]};
     my ($parameter, $section);
+    my $oldprefix = $lineprefix;
     my $start;
 
     print ".. c:function:: ";
@@ -1790,29 +1847,37 @@ sub output_function_rst(%) {
            print $type . " " . $parameter;
        }
     }
-    print ")\n\n    " . $args{'purpose'} . "\n\n";
+    print ")\n\n";
+    print_lineno($declaration_start_line);
+    $lineprefix = "   ";
+    output_highlight_rst($args{'purpose'});
+    print "\n";
 
-    print ":Parameters:\n\n";
+    print "**Parameters**\n\n";
+    $lineprefix = "  ";
     foreach $parameter (@{$args{'parameterlist'}}) {
        my $parameter_name = $parameter;
        #$parameter_name =~ s/\[.*//;
        $type = $args{'parametertypes'}{$parameter};
 
        if ($type ne "") {
-           print "      ``$type $parameter``\n";
+           print "``$type $parameter``\n";
        } else {
-           print "      ``$parameter``\n";
+           print "``$parameter``\n";
        }
-       if ($args{'parameterdescs'}{$parameter_name} ne $undescribed) {
-           my $oldprefix = $lineprefix;
-           $lineprefix = "        ";
+
+        print_lineno($parameterdesc_start_lines{$parameter_name});
+
+       if (defined($args{'parameterdescs'}{$parameter_name}) &&
+           $args{'parameterdescs'}{$parameter_name} ne $undescribed) {
            output_highlight_rst($args{'parameterdescs'}{$parameter_name});
-           $lineprefix = $oldprefix;
        } else {
-           print "\n        _undescribed_\n";
+           print "  *undescribed*\n";
        }
        print "\n";
     }
+
+    $lineprefix = $oldprefix;
     output_section_rst(@_);
 }
 
@@ -1820,10 +1885,11 @@ sub output_section_rst(%) {
     my %args = %{$_[0]};
     my $section;
     my $oldprefix = $lineprefix;
-    $lineprefix = "        ";
+    $lineprefix = "";
 
     foreach $section (@{$args{'sectionlist'}}) {
-       print ":$section:\n\n";
+       print "**$section**\n\n";
+        print_lineno($section_start_lines{$section});
        output_highlight_rst($args{'sections'}{$section});
        print "\n";
     }
@@ -1834,24 +1900,28 @@ sub output_section_rst(%) {
 sub output_enum_rst(%) {
     my %args = %{$_[0]};
     my ($parameter);
+    my $oldprefix = $lineprefix;
     my $count;
     my $name = "enum " . $args{'enum'};
 
     print "\n\n.. c:type:: " . $name . "\n\n";
-    print "    " . $args{'purpose'} . "\n\n";
+    print_lineno($declaration_start_line);
+    $lineprefix = "   ";
+    output_highlight_rst($args{'purpose'});
+    print "\n";
 
-    print "..\n\n:Constants:\n\n";
-    my $oldprefix = $lineprefix;
-    $lineprefix = "    ";
+    print "**Constants**\n\n";
+    $lineprefix = "  ";
     foreach $parameter (@{$args{'parameterlist'}}) {
-       print "  `$parameter`\n";
+       print "``$parameter``\n";
        if ($args{'parameterdescs'}{$parameter} ne $undescribed) {
            output_highlight_rst($args{'parameterdescs'}{$parameter});
        } else {
-           print "    undescribed\n";
+           print "  *undescribed*\n";
        }
        print "\n";
     }
+
     $lineprefix = $oldprefix;
     output_section_rst(@_);
 }
@@ -1859,30 +1929,37 @@ sub output_enum_rst(%) {
 sub output_typedef_rst(%) {
     my %args = %{$_[0]};
     my ($parameter);
-    my $count;
+    my $oldprefix = $lineprefix;
     my $name = "typedef " . $args{'typedef'};
 
-    ### FIXME: should the name below contain "typedef" or not?
     print "\n\n.. c:type:: " . $name . "\n\n";
-    print "    " . $args{'purpose'} . "\n\n";
+    print_lineno($declaration_start_line);
+    $lineprefix = "   ";
+    output_highlight_rst($args{'purpose'});
+    print "\n";
 
+    $lineprefix = $oldprefix;
     output_section_rst(@_);
 }
 
 sub output_struct_rst(%) {
     my %args = %{$_[0]};
     my ($parameter);
+    my $oldprefix = $lineprefix;
     my $name = $args{'type'} . " " . $args{'struct'};
 
     print "\n\n.. c:type:: " . $name . "\n\n";
-    print "    " . $args{'purpose'} . "\n\n";
+    print_lineno($declaration_start_line);
+    $lineprefix = "   ";
+    output_highlight_rst($args{'purpose'});
+    print "\n";
 
-    print ":Definition:\n\n";
-    print " ::\n\n";
+    print "**Definition**\n\n";
+    print "::\n\n";
     print "  " . $args{'type'} . " " . $args{'struct'} . " {\n";
     foreach $parameter (@{$args{'parameterlist'}}) {
        if ($parameter =~ /^#/) {
-           print "    " . "$parameter\n";
+           print "  " . "$parameter\n";
            next;
        }
 
@@ -1903,7 +1980,8 @@ sub output_struct_rst(%) {
     }
     print "  };\n\n";
 
-    print ":Members:\n\n";
+    print "**Members**\n\n";
+    $lineprefix = "  ";
     foreach $parameter (@{$args{'parameterlist'}}) {
        ($parameter =~ /^#/) && next;
 
@@ -1912,14 +1990,14 @@ sub output_struct_rst(%) {
 
        ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next;
        $type = $args{'parametertypes'}{$parameter};
-       print "      `$type $parameter`" . "\n";
-       my $oldprefix = $lineprefix;
-       $lineprefix = "        ";
+        print_lineno($parameterdesc_start_lines{$parameter_name});
+       print "``$type $parameter``\n";
        output_highlight_rst($args{'parameterdescs'}{$parameter_name});
-       $lineprefix = $oldprefix;
        print "\n";
     }
     print "\n";
+
+    $lineprefix = $oldprefix;
     output_section_rst(@_);
 }
 
@@ -1969,9 +2047,13 @@ sub output_declaration {
     my $name = shift;
     my $functype = shift;
     my $func = "output_${functype}_$output_mode";
-    if (($function_only==0) ||
-       ( $function_only == 1 && defined($function_table{$name})) ||
-       ( $function_only == 2 && !($functype eq "function" && defined($function_table{$name}))))
+    if (($output_selection == OUTPUT_ALL) ||
+       (($output_selection == OUTPUT_INCLUDE ||
+         $output_selection == OUTPUT_EXPORTED) &&
+        defined($function_table{$name})) ||
+       (($output_selection == OUTPUT_EXCLUDE ||
+         $output_selection == OUTPUT_INTERNAL) &&
+        !($functype eq "function" && defined($function_table{$name}))))
     {
        &$func(@_);
        $section_counter++;
@@ -2471,7 +2553,6 @@ sub dump_function($$) {
 
 sub reset_state {
     $function = "";
-    %constants = ();
     %parameterdescs = ();
     %parametertypes = ();
     @parameterlist = ();
@@ -2481,8 +2562,8 @@ sub reset_state {
     $struct_actual = "";
     $prototype = "";
 
-    $state = 0;
-    $split_doc_state = 0;
+    $state = STATE_NORMAL;
+    $inline_doc_state = STATE_INLINE_NA;
 }
 
 sub tracepoint_munge($) {
@@ -2545,7 +2626,7 @@ sub syscall_munge() {
        }
 }
 
-sub process_state3_function($$) {
+sub process_proto_function($$) {
     my $x = shift;
     my $file = shift;
 
@@ -2575,7 +2656,7 @@ sub process_state3_function($$) {
     }
 }
 
-sub process_state3_type($$) {
+sub process_proto_type($$) {
     my $x = shift;
     my $file = shift;
 
@@ -2657,6 +2738,7 @@ sub process_file($) {
     my $in_purpose = 0;
     my $initial_section_counter = $section_counter;
     my ($orig_file) = @_;
+    my $leading_space;
 
     if (defined($ENV{'SRCTREE'})) {
        $file = "$ENV{'SRCTREE'}" . "/" . $orig_file;
@@ -2674,6 +2756,17 @@ sub process_file($) {
        return;
     }
 
+    # two passes for -export and -internal
+    if ($output_selection == OUTPUT_EXPORTED ||
+       $output_selection == OUTPUT_INTERNAL) {
+       while (<IN>) {
+           if (/$export_symbol/o) {
+               $function_table{$2} = 1;
+           }
+       }
+       seek(IN, 0, 0);
+    }
+
     $. = 1;
 
     $section_counter = 0;
@@ -2681,15 +2774,18 @@ sub process_file($) {
        while (s/\\\s*$//) {
            $_ .= <IN>;
        }
-       if ($state == 0) {
+       if ($state == STATE_NORMAL) {
            if (/$doc_start/o) {
-               $state = 1;             # next line is always the function name
+               $state = STATE_NAME;    # next line is always the function name
                $in_doc_sect = 0;
+               $declaration_start_line = $. + 1;
            }
-       } elsif ($state == 1) { # this line is the function name (always)
+       } elsif ($state == STATE_NAME) {# this line is the function name (always)
            if (/$doc_block/o) {
-               $state = 4;
+               $state = STATE_DOCBLOCK;
                $contents = "";
+                $new_start_line = $. + 1;
+
                if ( $1 eq "" ) {
                        $section = $section_intro;
                } else {
@@ -2702,7 +2798,12 @@ sub process_file($) {
                    $identifier = $1;
                }
 
-               $state = 2;
+               $state = STATE_FIELD;
+               # if there's no @param blocks need to set up default section
+               # here
+               $contents = "";
+               $section = $section_default;
+               $new_start_line = $. + 1;
                if (/-(.*)/) {
                    # strip leading/trailing/multiple spaces
                    $descr= $1;
@@ -2740,13 +2841,25 @@ sub process_file($) {
                print STDERR "${file}:$.: warning: Cannot understand $_ on line $.",
                " - I thought it was a doc line\n";
                ++$warnings;
-               $state = 0;
+               $state = STATE_NORMAL;
            }
-       } elsif ($state == 2) { # look for head: lines, and include content
-           if (/$doc_sect/o) {
+       } elsif ($state == STATE_FIELD) {       # look for head: lines, and include content
+           if (/$doc_sect/i) { # case insensitive for supported section names
                $newsection = $1;
                $newcontents = $2;
 
+               # map the supported section names to the canonical names
+               if ($newsection =~ m/^description$/i) {
+                   $newsection = $section_default;
+               } elsif ($newsection =~ m/^context$/i) {
+                   $newsection = $section_context;
+               } elsif ($newsection =~ m/^returns?$/i) {
+                   $newsection = $section_return;
+               } elsif ($newsection =~ m/^\@return$/) {
+                   # special: @return is a section, not a param description
+                   $newsection = $section_return;
+               }
+
                if (($contents ne "") && ($contents ne "\n")) {
                    if (!$in_doc_sect && $verbose) {
                        print STDERR "${file}:$.: warning: contents before sections\n";
@@ -2759,14 +2872,16 @@ sub process_file($) {
                $in_doc_sect = 1;
                $in_purpose = 0;
                $contents = $newcontents;
+                $new_start_line = $.;
+               while ((substr($contents, 0, 1) eq " ") ||
+                      substr($contents, 0, 1) eq "\t") {
+                   $contents = substr($contents, 1);
+               }
                if ($contents ne "") {
-                   while ((substr($contents, 0, 1) eq " ") ||
-                       substr($contents, 0, 1) eq "\t") {
-                           $contents = substr($contents, 1);
-                   }
                    $contents .= "\n";
                }
                $section = $newsection;
+               $leading_space = undef;
            } elsif (/$doc_end/) {
                if (($contents ne "") && ($contents ne "\n")) {
                    dump_section($file, $section, xml_escape($contents));
@@ -2780,7 +2895,7 @@ sub process_file($) {
                }
 
                $prototype = "";
-               $state = 3;
+               $state = STATE_PROTO;
                $brcount = 0;
 #              print STDERR "end of doc comment, looking for prototype\n";
            } elsif (/$doc_content/) {
@@ -2791,6 +2906,7 @@ sub process_file($) {
                        dump_section($file, $section, xml_escape($contents));
                        $section = $section_default;
                        $contents = "";
+                        $new_start_line = $.;
                    } else {
                        $contents .= "\n";
                    }
@@ -2801,87 +2917,86 @@ sub process_file($) {
                    $declaration_purpose .= " " . xml_escape($1);
                    $declaration_purpose =~ s/\s+/ /g;
                } else {
-                   $contents .= $1 . "\n";
+                   my $cont = $1;
+                   if ($section =~ m/^@/ || $section eq $section_context) {
+                       if (!defined $leading_space) {
+                           if ($cont =~ m/^(\s+)/) {
+                               $leading_space = $1;
+                           } else {
+                               $leading_space = "";
+                           }
+                       }
+
+                       $cont =~ s/^$leading_space//;
+                   }
+                   $contents .= $cont . "\n";
                }
            } else {
                # i dont know - bad line?  ignore.
                print STDERR "${file}:$.: warning: bad line: $_";
                ++$warnings;
            }
-       } elsif ($state == 5) { # scanning for split parameters
+       } elsif ($state == STATE_INLINE) { # scanning for inline parameters
            # First line (state 1) needs to be a @parameter
-           if ($split_doc_state == 1 && /$doc_split_sect/o) {
+           if ($inline_doc_state == STATE_INLINE_NAME && /$doc_inline_sect/o) {
                $section = $1;
                $contents = $2;
+                $new_start_line = $.;
                if ($contents ne "") {
                    while ((substr($contents, 0, 1) eq " ") ||
                           substr($contents, 0, 1) eq "\t") {
                        $contents = substr($contents, 1);
                    }
-               $contents .= "\n";
+                   $contents .= "\n";
                }
-               $split_doc_state = 2;
+               $inline_doc_state = STATE_INLINE_TEXT;
            # Documentation block end */
-           } elsif (/$doc_split_end/) {
+           } elsif (/$doc_inline_end/) {
                if (($contents ne "") && ($contents ne "\n")) {
                    dump_section($file, $section, xml_escape($contents));
                    $section = $section_default;
                    $contents = "";
                }
-               $state = 3;
-               $split_doc_state = 0;
+               $state = STATE_PROTO;
+               $inline_doc_state = STATE_INLINE_NA;
            # Regular text
            } elsif (/$doc_content/) {
-               if ($split_doc_state == 2) {
+               if ($inline_doc_state == STATE_INLINE_TEXT) {
                    $contents .= $1 . "\n";
-               } elsif ($split_doc_state == 1) {
-                   $split_doc_state = 4;
+                   # nuke leading blank lines
+                   if ($contents =~ /^\s*$/) {
+                       $contents = "";
+                   }
+               } elsif ($inline_doc_state == STATE_INLINE_NAME) {
+                   $inline_doc_state = STATE_INLINE_ERROR;
                    print STDERR "Warning(${file}:$.): ";
                    print STDERR "Incorrect use of kernel-doc format: $_";
                    ++$warnings;
                }
            }
-       } elsif ($state == 3) { # scanning for function '{' (end of prototype)
-           if (/$doc_split_start/) {
-               $state = 5;
-               $split_doc_state = 1;
+       } elsif ($state == STATE_PROTO) {       # scanning for function '{' (end of prototype)
+           if (/$doc_inline_start/) {
+               $state = STATE_INLINE;
+               $inline_doc_state = STATE_INLINE_NAME;
            } elsif ($decl_type eq 'function') {
-               process_state3_function($_, $file);
+               process_proto_function($_, $file);
            } else {
-               process_state3_type($_, $file);
+               process_proto_type($_, $file);
            }
-       } elsif ($state == 4) {
-               # Documentation block
-               if (/$doc_block/) {
-                       dump_doc_section($file, $section, xml_escape($contents));
-                       $contents = "";
-                       $function = "";
-                       %constants = ();
-                       %parameterdescs = ();
-                       %parametertypes = ();
-                       @parameterlist = ();
-                       %sections = ();
-                       @sectionlist = ();
-                       $prototype = "";
-                       if ( $1 eq "" ) {
-                               $section = $section_intro;
-                       } else {
-                               $section = $1;
-                       }
-               }
-               elsif (/$doc_end/)
+       } elsif ($state == STATE_DOCBLOCK) {
+               if (/$doc_end/)
                {
                        dump_doc_section($file, $section, xml_escape($contents));
+                       $section = $section_default;
                        $contents = "";
                        $function = "";
-                       %constants = ();
                        %parameterdescs = ();
                        %parametertypes = ();
                        @parameterlist = ();
                        %sections = ();
                        @sectionlist = ();
                        $prototype = "";
-                       $state = 0;
+                       $state = STATE_NORMAL;
                }
                elsif (/$doc_content/)
                {
@@ -2898,7 +3013,7 @@ sub process_file($) {
     }
     if ($initial_section_counter == $section_counter) {
        print STDERR "${file}:1: warning: no structured comments found\n";
-       if (($function_only == 1) && ($show_not_found == 1)) {
+       if (($output_selection == OUTPUT_INCLUDE) && ($show_not_found == 1)) {
            print STDERR "    Was looking for '$_'.\n" for keys %function_table;
        }
        if ($output_mode eq "xml") {
index a9155077feefb957d38dc29b4823a9529a7ee0e9..fec75786f75bbb348d03143af7d2280015394643 100644 (file)
@@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod)
        len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
                      (*type)[0] ? *type : "*");
 
-       if (compatible[0])
+       if ((*compatible)[0])
                sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
                        *compatible);
 
index 2660fbcf94d1e0e8affa72d4656584bc0aa06216..7798e1608f4f4bb429c94017e2c6f73b07a7afc9 100644 (file)
@@ -500,34 +500,34 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
 {
        struct common_audit_data sa;
        struct apparmor_audit_data aad = {0,};
-       char *command, *args = value;
+       char *command, *largs = NULL, *args = value;
        size_t arg_size;
        int error;
 
        if (size == 0)
                return -EINVAL;
-       /* args points to a PAGE_SIZE buffer, AppArmor requires that
-        * the buffer must be null terminated or have size <= PAGE_SIZE -1
-        * so that AppArmor can null terminate them
-        */
-       if (args[size - 1] != '\0') {
-               if (size == PAGE_SIZE)
-                       return -EINVAL;
-               args[size] = '\0';
-       }
-
        /* task can only write its own attributes */
        if (current != task)
                return -EACCES;
 
-       args = value;
+       /* AppArmor requires that the buffer must be null terminated atm */
+       if (args[size - 1] != '\0') {
+               /* null terminate */
+               largs = args = kmalloc(size + 1, GFP_KERNEL);
+               if (!args)
+                       return -ENOMEM;
+               memcpy(args, value, size);
+               args[size] = '\0';
+       }
+
+       error = -EINVAL;
        args = strim(args);
        command = strsep(&args, " ");
        if (!args)
-               return -EINVAL;
+               goto out;
        args = skip_spaces(args);
        if (!*args)
-               return -EINVAL;
+               goto out;
 
        arg_size = size - (args - (char *) value);
        if (strcmp(name, "current") == 0) {
@@ -553,10 +553,12 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
                        goto fail;
        } else
                /* only support the "current" and "exec" process attributes */
-               return -EINVAL;
+               goto fail;
 
        if (!error)
                error = size;
+out:
+       kfree(largs);
        return error;
 
 fail:
@@ -565,9 +567,9 @@ fail:
        aad.profile = aa_current_profile();
        aad.op = OP_SETPROCATTR;
        aad.info = name;
-       aad.error = -EINVAL;
+       aad.error = error = -EINVAL;
        aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
-       return -EINVAL;
+       goto out;
 }
 
 static int apparmor_task_setrlimit(struct task_struct *task,
index c8783b3b628ca6282bd59597eb1a7f685f1e48cc..36c80bf5b89c6f28a7c6010aa6b72f373bf4d1e1 100644 (file)
@@ -134,7 +134,7 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
 
        case KEYCTL_DH_COMPUTE:
                return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3),
-                                        arg4);
+                                        arg4, compat_ptr(arg5));
 
        default:
                return -EOPNOTSUPP;
index 880505a4b9f1e273429783aa5ebe1eee2579674c..531ed2ec132f4f0ab97143b86b3bb55d7eb527c8 100644 (file)
@@ -78,7 +78,8 @@ error:
 }
 
 long keyctl_dh_compute(struct keyctl_dh_params __user *params,
-                      char __user *buffer, size_t buflen)
+                      char __user *buffer, size_t buflen,
+                      void __user *reserved)
 {
        long ret;
        MPI base, private, prime, result;
@@ -97,6 +98,11 @@ long keyctl_dh_compute(struct keyctl_dh_params __user *params,
                goto out;
        }
 
+       if (reserved) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        keylen = mpi_from_key(pcopy.prime, buflen, &prime);
        if (keylen < 0 || !prime) {
                /* buflen == 0 may be used to query the required buffer size,
index 8ec7a528365d967b8c82a97ded54c20df2459ce6..a705a7d92ad7a95ca513ae2d3da8a18879196a2a 100644 (file)
@@ -260,10 +260,11 @@ static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
 
 #ifdef CONFIG_KEY_DH_OPERATIONS
 extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
-                             size_t);
+                             size_t, void __user *);
 #else
 static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
-                                    char __user *buffer, size_t buflen)
+                                    char __user *buffer, size_t buflen,
+                                    void __user *reserved)
 {
        return -EOPNOTSUPP;
 }
index bd5a272f28a66f689e3dc3bc0951d37e302ad301..346fbf201c22e37e67dc58b5684c868e79ad9af5 100644 (file)
@@ -597,7 +597,7 @@ int key_reject_and_link(struct key *key,
 
        mutex_unlock(&key_construction_mutex);
 
-       if (keyring)
+       if (keyring && link_ret == 0)
                __key_link_end(keyring, &key->index_key, edit);
 
        /* wake up anyone waiting for a key to be constructed */
index 3b135a0af34406a52dc8019f664fd855c5ad0068..d580ad06b792ff539f124a620a4c8791740f22dd 100644 (file)
@@ -1688,8 +1688,8 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
 
        case KEYCTL_DH_COMPUTE:
                return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
-                                        (char __user *) arg3,
-                                        (size_t) arg4);
+                                        (char __user *) arg3, (size_t) arg4,
+                                        (void __user *) arg5);
 
        default:
                return -EOPNOTSUPP;
index a85d45595d02a265f1f8e1a4cd36c812d8cd08f0..b4fe9b00251251d596fbfbce2dd87eb44841b917 100644 (file)
@@ -160,6 +160,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
        
        if (snd_BUG_ON(!card || !id))
                return;
+       if (card->shutdown)
+               return;
        read_lock(&card->ctl_files_rwlock);
 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
        card->mixer_oss_change_count++;
index 308c9ecf73db18415aa1157ee4e895731a5abc36..8e980aa678d0d8412e951d78f3026f25e6f6badb 100644 (file)
@@ -849,6 +849,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
 }
 EXPORT_SYMBOL(snd_pcm_new_internal);
 
+static void free_chmap(struct snd_pcm_str *pstr)
+{
+       if (pstr->chmap_kctl) {
+               snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
+               pstr->chmap_kctl = NULL;
+       }
+}
+
 static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
 {
        struct snd_pcm_substream *substream, *substream_next;
@@ -871,6 +879,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
                kfree(setup);
        }
 #endif
+       free_chmap(pstr);
        if (pstr->substream_count)
                put_device(&pstr->dev);
 }
@@ -1135,10 +1144,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
        for (cidx = 0; cidx < 2; cidx++) {
                if (!pcm->internal)
                        snd_unregister_device(&pcm->streams[cidx].dev);
-               if (pcm->streams[cidx].chmap_kctl) {
-                       snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
-                       pcm->streams[cidx].chmap_kctl = NULL;
-               }
+               free_chmap(&pcm->streams[cidx]);
        }
        mutex_unlock(&pcm->open_mutex);
        mutex_unlock(&register_mutex);
index e722022d325d7771d0d516c08ad830d814eeebdc..9a6157ea6881703310586bea76f5e1c61fcfb2a5 100644 (file)
@@ -1955,6 +1955,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
                qhead = tu->qhead++;
                tu->qhead %= tu->queue_size;
+               tu->qused--;
                spin_unlock_irq(&tu->qlock);
 
                if (tu->tread) {
@@ -1968,7 +1969,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                }
 
                spin_lock_irq(&tu->qlock);
-               tu->qused--;
                if (err < 0)
                        goto _error;
                result += unit;
index c0f8f613f1f1b5e954ffa82b760c82353dd17c80..172dacd925f5721c2726194ddb917aa612f4afc3 100644 (file)
@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
 
 static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
 {
+       hrtimer_cancel(&dpcm->timer);
        tasklet_kill(&dpcm->tasklet);
 }
 
index 87041ddd29cbcbf2c63fca9ed90ffdbc0e5f249d..47a358fab13280acad02b319d012953eb0a57e7d 100644 (file)
@@ -444,7 +444,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
        err = reg_raw_write(codec, reg, val);
        if (err == -EAGAIN) {
                err = snd_hdac_power_up_pm(codec);
-               if (!err)
+               if (err >= 0)
                        err = reg_raw_write(codec, reg, val);
                snd_hdac_power_down_pm(codec);
        }
@@ -470,7 +470,7 @@ static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
        err = reg_raw_read(codec, reg, val, uncached);
        if (err == -EAGAIN) {
                err = snd_hdac_power_up_pm(codec);
-               if (!err)
+               if (err >= 0)
                        err = reg_raw_read(codec, reg, val, uncached);
                snd_hdac_power_down_pm(codec);
        }
index 4a054d72011246db38fbdba68c25f3da0c0ce372..d3125c16968457436de5b659da2127157b5fbcba 100644 (file)
@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
        int page, p, pp, delta, i;
 
        page =
-           (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
-            WT_SUBBUF_MASK)
-           >> WT_SUBBUF_SHIFT;
+           (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
+            >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
        if (dma->nr_periods >= 4)
                delta = (page - dma->period_real) & 3;
        else {
index 1cb85aeb0cea058d35e5ed4b48b1ce6f9d166ff2..286f5e3686a3e7aa7a504dbf68cfb2104bdd4bdb 100644 (file)
@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
        u32 pipe_alloc_mask;
        int err;
 
-       commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
+       commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
        if (commpage_bak == NULL)
                return -ENOMEM;
        commpage = chip->comm_page;
-       memcpy(commpage_bak, commpage, sizeof(struct comm_page));
+       memcpy(commpage_bak, commpage, sizeof(*commpage));
 
        err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
        if (err < 0) {
index 320445f3bf736d51e3dcfc884cdf5f7aef0b3855..79c7b340acc2361518b51504b7ecb032a71991b2 100644 (file)
@@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
 
        for (n = 0; n < spec->paths.used; n++) {
                path = snd_array_elem(&spec->paths, n);
+               if (!path->depth)
+                       continue;
                if (path->path[0] == nid ||
                    path->path[path->depth - 1] == nid) {
                        bool pin_old = path->pin_enabled;
index 9a0d1445ca5cf85c774a6f4e4be1d5a44770adad..6f8ea13323c1819c27dbe10f2af0293415f06135 100644 (file)
@@ -365,8 +365,12 @@ enum {
 
 #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
 #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
+#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
+#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
+                       IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
 
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
@@ -1214,8 +1218,10 @@ static int azx_free(struct azx *chip)
        if (use_vga_switcheroo(hda)) {
                if (chip->disabled && hda->probe_continued)
                        snd_hda_unlock_devices(&chip->bus);
-               if (hda->vga_switcheroo_registered)
+               if (hda->vga_switcheroo_registered) {
                        vga_switcheroo_unregister_client(chip->pci);
+                       vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
+               }
        }
 
        if (bus->chip_init) {
@@ -2181,6 +2187,15 @@ static const struct pci_device_id azx_ids[] = {
        /* Sunrise Point-LP */
        { PCI_DEVICE(0x8086, 0x9d70),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Kabylake */
+       { PCI_DEVICE(0x8086, 0xa171),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Kabylake-LP */
+       { PCI_DEVICE(0x8086, 0x9d71),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Kabylake-H */
+       { PCI_DEVICE(0x8086, 0xa2f0),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
@@ -2254,6 +2269,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x157a),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0x15b3),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x793b),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0x7919),
index 17fd81736d3d67f6a6fb67ef1ad624a6d96d93a9..0621920f7617f91900591c46970f6cbd03216771 100644 (file)
@@ -115,20 +115,20 @@ static int substream_free_pages(struct azx *chip,
 /*
  * Register access ops. Tegra HDA register access is DWORD only.
  */
-static void hda_tegra_writel(u32 value, u32 *addr)
+static void hda_tegra_writel(u32 value, u32 __iomem *addr)
 {
        writel(value, addr);
 }
 
-static u32 hda_tegra_readl(u32 *addr)
+static u32 hda_tegra_readl(u32 __iomem *addr)
 {
        return readl(addr);
 }
 
-static void hda_tegra_writew(u16 value, u16 *addr)
+static void hda_tegra_writew(u16 value, u16 __iomem  *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
@@ -137,20 +137,20 @@ static void hda_tegra_writew(u16 value, u16 *addr)
        writel(v, dword_addr);
 }
 
-static u16 hda_tegra_readw(u16 *addr)
+static u16 hda_tegra_readw(u16 __iomem *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
        return (v >> shift) & 0xffff;
 }
 
-static void hda_tegra_writeb(u8 value, u8 *addr)
+static void hda_tegra_writeb(u8 value, u8 __iomem *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
@@ -159,10 +159,10 @@ static void hda_tegra_writeb(u8 value, u8 *addr)
        writel(v, dword_addr);
 }
 
-static u8 hda_tegra_readb(u8 *addr)
+static u8 hda_tegra_readb(u8 __iomem *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
index d53c25e7a1c19473d98d001407c5321fc0578e8d..abcb5a6a1cd9547db9c83e1cada1456615d47db8 100644 (file)
@@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0234:
        case 0x10ec0274:
        case 0x10ec0294:
+       case 0x10ec0700:
+       case 0x10ec0701:
+       case 0x10ec0703:
                alc_update_coef_idx(codec, 0x10, 1<<15, 0);
                break;
        case 0x10ec0662:
@@ -2655,6 +2658,7 @@ enum {
        ALC269_TYPE_ALC256,
        ALC269_TYPE_ALC225,
        ALC269_TYPE_ALC294,
+       ALC269_TYPE_ALC700,
 };
 
 /*
@@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC256:
        case ALC269_TYPE_ALC225:
        case ALC269_TYPE_ALC294:
+       case ALC269_TYPE_ALC700:
                ssids = alc269_ssids;
                break;
        default:
@@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
 static void alc_headset_mode_unplugged(struct hda_codec *codec)
 {
        static struct coef_fw coef0255[] = {
-               WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
                WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
                UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
                WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
                WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
                {}
        };
+       static struct coef_fw coef0255_1[] = {
+               WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+               {}
+       };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x1b, 0x0c0b),
                WRITE_COEF(0x45, 0xc429),
@@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
+               alc_process_coef_fw(codec, coef0255_1);
+               alc_process_coef_fw(codec, coef0255);
+               break;
        case 0x10ec0256:
+               alc_process_coef_fw(codec, coef0256);
                alc_process_coef_fw(codec, coef0255);
                break;
        case 0x10ec0233:
@@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
                WRITE_COEFEX(0x57, 0x03, 0x8ea6),
                {}
        };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
+               WRITE_COEF(0x1b, 0x0c6b),
+               WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x45, 0xd429),
                WRITE_COEF(0x1b, 0x0c2b),
@@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
-       case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0256:
+               alc_process_coef_fw(codec, coef0256);
+               break;
        case 0x10ec0233:
        case 0x10ec0283:
                alc_process_coef_fw(codec, coef0233);
@@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
                WRITE_COEFEX(0x57, 0x03, 0x8ea6),
                {}
        };
+       static struct coef_fw coef0256[] = {
+               WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
+               WRITE_COEF(0x1b, 0x0c6b),
+               WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+               {}
+       };
        static struct coef_fw coef0233[] = {
                WRITE_COEF(0x45, 0xe429),
                WRITE_COEF(0x1b, 0x0c2b),
@@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
-       case 0x10ec0256:
                alc_process_coef_fw(codec, coef0255);
                break;
+       case 0x10ec0256:
+               alc_process_coef_fw(codec, coef0256);
+               break;
        case 0x10ec0233:
        case 0x10ec0283:
                alc_process_coef_fw(codec, coef0233);
@@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
 static void alc255_set_default_jack_type(struct hda_codec *codec)
 {
        /* Set to iphone type */
-       static struct coef_fw fw[] = {
+       static struct coef_fw alc255fw[] = {
                WRITE_COEF(0x1b, 0x880b),
                WRITE_COEF(0x45, 0xd089),
                WRITE_COEF(0x1b, 0x080b),
@@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
                WRITE_COEF(0x1b, 0x0c0b),
                {}
        };
-       alc_process_coef_fw(codec, fw);
+       static struct coef_fw alc256fw[] = {
+               WRITE_COEF(0x1b, 0x884b),
+               WRITE_COEF(0x45, 0xd089),
+               WRITE_COEF(0x1b, 0x084b),
+               WRITE_COEF(0x46, 0x0004),
+               WRITE_COEF(0x1b, 0x0c4b),
+               {}
+       };
+       switch (codec->core.vendor_id) {
+       case 0x10ec0255:
+               alc_process_coef_fw(codec, alc255fw);
+               break;
+       case 0x10ec0256:
+               alc_process_coef_fw(codec, alc256fw);
+               break;
+       }
        msleep(30);
 }
 
@@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
@@ -5602,6 +5650,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+       SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
+       SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
+       SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5687,7 +5738,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {}
 };
 #define ALC225_STANDARD_PINS \
-       {0x12, 0xb7a60130}, \
        {0x21, 0x04211020}
 
 #define ALC256_STANDARD_PINS \
@@ -5712,10 +5762,24 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60130},
+               {0x14, 0x901701a0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60130},
+               {0x14, 0x901701b0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60150},
                {0x14, 0x901701a0}),
        SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60150},
                {0x14, 0x901701b0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60130},
+               {0x1b, 0x90170110}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
@@ -5775,10 +5839,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60180},
                {0x14, 0x90170130},
                {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60180},
+               {0x14, 0x90170120},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60170},
+               {0x14, 0x90170120},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
@@ -6053,6 +6125,14 @@ static int patch_alc269(struct hda_codec *codec)
        case 0x10ec0294:
                spec->codec_variant = ALC269_TYPE_ALC294;
                break;
+       case 0x10ec0700:
+       case 0x10ec0701:
+       case 0x10ec0703:
+               spec->codec_variant = ALC269_TYPE_ALC700;
+               spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
+               alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+               break;
+
        }
 
        if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -7008,6 +7088,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680),
+       HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882),
        HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
        HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
index 4d82a58ff6b0b9c43c9f6f892ecbd6ab79e44de8..f3fb98f0a995bb41dd473ccc4443164e404c9883 100644 (file)
@@ -483,9 +483,10 @@ config SND_SOC_DMIC
        tristate
 
 config SND_SOC_HDMI_CODEC
-       tristate
-       select SND_PCM_ELD
-       select SND_PCM_IEC958
+       tristate
+       select SND_PCM_ELD
+       select SND_PCM_IEC958
+       select HDMI
 
 config SND_SOC_ES8328
        tristate "Everest Semi ES8328 CODEC"
index 647f69de6baac94c3d6aef8a9ff297894ec0c47b..5013d2ba0c10a968045d83e5e5cef6e223e55436 100644 (file)
@@ -146,6 +146,7 @@ static const struct regmap_config ak4613_regmap_cfg = {
        .max_register           = 0x16,
        .reg_defaults           = ak4613_reg,
        .num_reg_defaults       = ARRAY_SIZE(ak4613_reg),
+       .cache_type             = REGCACHE_RBTREE,
 };
 
 static const struct of_device_id ak4613_of_match[] = {
@@ -530,7 +531,6 @@ static int ak4613_i2c_remove(struct i2c_client *client)
 static struct i2c_driver ak4613_i2c_driver = {
        .driver = {
                .name = "ak4613-codec",
-               .owner = THIS_MODULE,
                .of_match_table = ak4613_of_match,
        },
        .probe          = ak4613_i2c_probe,
index d6f4abbbf8a7fd669237d667a03825c13c3603ed..fb3885fe0afb75a872a8d08f7d6fb931555e2446 100644 (file)
@@ -226,6 +226,7 @@ static int v253_open(struct tty_struct *tty)
        if (!tty->disc_data)
                return -ENODEV;
 
+       tty->receive_room = 16;
        if (tty->ops->write(tty, v253_init, len) != len) {
                ret = -EIO;
                goto err;
index 181cd3bf0b926c750257eee7351558405e2a7e30..2abb742fc47b53f19a3a2024fd6424a86c63c631 100644 (file)
@@ -1474,6 +1474,11 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
         * exit, we call pm_runtime_suspend() so that will do for us
         */
        hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+       if (!hlink) {
+               dev_err(&edev->hdac.dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_get(edev->ebus, hlink);
 
        ret = create_fill_widget_route_map(dapm);
@@ -1634,6 +1639,11 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 
        /* hold the ref while we probe */
        hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+       if (!hlink) {
+               dev_err(&edev->hdac.dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_get(edev->ebus, hlink);
 
        hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
@@ -1744,6 +1754,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
        }
 
        hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+       if (!hlink) {
+               dev_err(dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_put(ebus, hlink);
 
        return 0;
@@ -1765,6 +1780,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
                return 0;
 
        hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+       if (!hlink) {
+               dev_err(dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_get(ebus, hlink);
 
        err = snd_hdac_display_power(bus, true);
index 8e36e883e453e6bda2a770cd9caa515ff0c20fce..f27d115626db7d4cb250c41e788eb5f4b329d91c 100644 (file)
@@ -112,7 +112,7 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
                return ret;
 
        if (hcp->hcd.ops->audio_startup) {
-               ret = hcp->hcd.ops->audio_startup(dai->dev->parent);
+               ret = hcp->hcd.ops->audio_startup(dai->dev->parent, hcp->hcd.data);
                if (ret) {
                        mutex_lock(&hcp->current_stream_lock);
                        hcp->current_stream = NULL;
@@ -122,8 +122,8 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
        }
 
        if (hcp->hcd.ops->get_eld) {
-               ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->eld,
-                                           sizeof(hcp->eld));
+               ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->hcd.data,
+                                           hcp->eld, sizeof(hcp->eld));
 
                if (!ret) {
                        ret = snd_pcm_hw_constraint_eld(substream->runtime,
@@ -144,7 +144,7 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
 
        WARN_ON(hcp->current_stream != substream);
 
-       hcp->hcd.ops->audio_shutdown(dai->dev->parent);
+       hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
 
        mutex_lock(&hcp->current_stream_lock);
        hcp->current_stream = NULL;
@@ -195,8 +195,8 @@ static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
        hp.sample_rate = params_rate(params);
        hp.channels = params_channels(params);
 
-       return hcp->hcd.ops->hw_params(dai->dev->parent, &hcp->daifmt[dai->id],
-                                      &hp);
+       return hcp->hcd.ops->hw_params(dai->dev->parent, hcp->hcd.data,
+                                      &hcp->daifmt[dai->id], &hp);
 }
 
 static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
@@ -280,7 +280,8 @@ static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
        dev_dbg(dai->dev, "%s()\n", __func__);
 
        if (hcp->hcd.ops->digital_mute)
-               return hcp->hcd.ops->digital_mute(dai->dev->parent, mute);
+               return hcp->hcd.ops->digital_mute(dai->dev->parent,
+                                                 hcp->hcd.data, mute);
 
        return 0;
 }
index 3c6594da6c9c95c8abb198706cac920bae3959da..d70847c9eeb03d8aeba2c316eadbaf68aed237bc 100644 (file)
@@ -253,7 +253,7 @@ static const struct reg_default rt5650_reg[] = {
        { 0x2b, 0x5454 },
        { 0x2c, 0xaaa0 },
        { 0x2d, 0x0000 },
-       { 0x2f, 0x1002 },
+       { 0x2f, 0x5002 },
        { 0x31, 0x5000 },
        { 0x32, 0x0000 },
        { 0x33, 0x0000 },
index 49a9e7049e2ba1457621cd549ca3722cce0a0688..0af5ddbef1daaa0b4fa7dbfc89c72c52b6a84bfd 100644 (file)
@@ -619,7 +619,7 @@ static const struct snd_kcontrol_new rt5670_snd_controls[] = {
                RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1),
        SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL,
                RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
-               39, 0, out_vol_tlv),
+               39, 1, out_vol_tlv),
        /* OUTPUT Control */
        SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1,
                RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
index da60e3fe5ee7afb37740b750e0f15f6156d6a4eb..e7fe6b7b95b7fc8e29b3da06f770c9d52f8bd155 100644 (file)
@@ -1872,7 +1872,7 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
                .capture = {
                        .stream_name = "Audio Trace CPU",
                        .channels_min = 1,
-                       .channels_max = 6,
+                       .channels_max = 4,
                        .rates = WM5102_RATES,
                        .formats = WM5102_FORMATS,
                },
index b5820e4d547170a4a02ce0a46b09a3712ea42ce1..d54f1b46c9ec08107427ff0f6bf81bf4670103bb 100644 (file)
@@ -1723,6 +1723,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
        { "OUT2L", NULL, "SYSCLK" },
        { "OUT2R", NULL, "SYSCLK" },
        { "OUT3L", NULL, "SYSCLK" },
+       { "OUT3R", NULL, "SYSCLK" },
        { "OUT4L", NULL, "SYSCLK" },
        { "OUT4R", NULL, "SYSCLK" },
        { "OUT5L", NULL, "SYSCLK" },
index f6f9395ea38ef88b40b31e70dafddf27ea6f4945..1c600819f7689b451ae4da81922649961de47c08 100644 (file)
@@ -743,6 +743,7 @@ static const struct regmap_config wm8940_regmap = {
        .max_register = WM8940_MONOMIX,
        .reg_defaults = wm8940_reg_defaults,
        .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults),
+       .cache_type = REGCACHE_RBTREE,
 
        .readable_reg = wm8940_readable_register,
        .volatile_reg = wm8940_volatile_register,
index 0f66fda2c7727c3c0821c97228054234b7fcbdba..237dc67002efbefd600f96bb76997d21890a8b9f 100644 (file)
@@ -1513,8 +1513,9 @@ static struct davinci_mcasp_pdata am33xx_mcasp_pdata = {
 };
 
 static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
-       .tx_dma_offset = 0x200,
-       .rx_dma_offset = 0x284,
+       /* The CFG port offset will be calculated if it is needed */
+       .tx_dma_offset = 0,
+       .rx_dma_offset = 0,
        .version = MCASP_VERSION_4,
 };
 
@@ -1734,6 +1735,52 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
        return PCM_EDMA;
 }
 
+static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+       int i;
+       u32 offset = 0;
+
+       if (pdata->version != MCASP_VERSION_4)
+               return pdata->tx_dma_offset;
+
+       for (i = 0; i < pdata->num_serializer; i++) {
+               if (pdata->serial_dir[i] == TX_MODE) {
+                       if (!offset) {
+                               offset = DAVINCI_MCASP_TXBUF_REG(i);
+                       } else {
+                               pr_err("%s: Only one serializer allowed!\n",
+                                      __func__);
+                               break;
+                       }
+               }
+       }
+
+       return offset;
+}
+
+static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+       int i;
+       u32 offset = 0;
+
+       if (pdata->version != MCASP_VERSION_4)
+               return pdata->rx_dma_offset;
+
+       for (i = 0; i < pdata->num_serializer; i++) {
+               if (pdata->serial_dir[i] == RX_MODE) {
+                       if (!offset) {
+                               offset = DAVINCI_MCASP_RXBUF_REG(i);
+                       } else {
+                               pr_err("%s: Only one serializer allowed!\n",
+                                      __func__);
+                               break;
+                       }
+               }
+       }
+
+       return offset;
+}
+
 static int davinci_mcasp_probe(struct platform_device *pdev)
 {
        struct snd_dmaengine_dai_dma_data *dma_data;
@@ -1862,7 +1909,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
        if (dat)
                dma_data->addr = dat->start;
        else
-               dma_data->addr = mem->start + pdata->tx_dma_offset;
+               dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata);
 
        dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
        res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -1883,7 +1930,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
                if (dat)
                        dma_data->addr = dat->start;
                else
-                       dma_data->addr = mem->start + pdata->rx_dma_offset;
+                       dma_data->addr =
+                               mem->start + davinci_mcasp_rxdma_offset(pdata);
 
                dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE];
                res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
index 1e8787fb3fb766bf386e51b629e2634294712a85..afddc8010c5415966d52aa549c2b7115b916d1f0 100644 (file)
@@ -85,9 +85,9 @@
                                                (n << 2))
 
 /* Transmit Buffer for Serializer n */
-#define DAVINCI_MCASP_TXBUF_REG                0x200
+#define DAVINCI_MCASP_TXBUF_REG(n)     (0x200 + (n << 2))
 /* Receive Buffer for Serializer n */
-#define DAVINCI_MCASP_RXBUF_REG                0x280
+#define DAVINCI_MCASP_RXBUF_REG(n)     (0x280 + (n << 2))
 
 /* McASP FIFO Registers */
 #define DAVINCI_MCASP_V2_AFIFO_BASE    (0x1010)
index 632ecc0e39562aab90a880e0c8b678a4ccea1078..bedec4a325813f92d752b16833ccfb0ba31b2db7 100644 (file)
@@ -952,16 +952,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
        ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
+               regmap_update_bits(regs, CCSR_SSI_STCCR,
+                                  CCSR_SSI_SxCCR_DC_MASK,
+                                  CCSR_SSI_SxCCR_DC(2));
+               regmap_update_bits(regs, CCSR_SSI_SRCCR,
+                                  CCSR_SSI_SxCCR_DC_MASK,
+                                  CCSR_SSI_SxCCR_DC(2));
                switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
                case SND_SOC_DAIFMT_CBM_CFS:
                case SND_SOC_DAIFMT_CBS_CFS:
                        ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
-                       regmap_update_bits(regs, CCSR_SSI_STCCR,
-                                       CCSR_SSI_SxCCR_DC_MASK,
-                                       CCSR_SSI_SxCCR_DC(2));
-                       regmap_update_bits(regs, CCSR_SSI_SRCCR,
-                                       CCSR_SSI_SxCCR_DC_MASK,
-                                       CCSR_SSI_SxCCR_DC(2));
                        break;
                case SND_SOC_DAIFMT_CBM_CFM:
                        ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
index 395168986462d2ef97659281ec1bf088c806b5fc..1bead81bb5106b590259780d4bcdef86dba5fc03 100644 (file)
@@ -182,24 +182,29 @@ static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
        case SNDRV_PCM_TRIGGER_START:
                if (stream->compr_ops->stream_start)
                        return stream->compr_ops->stream_start(sst->dev, stream->id);
+               break;
        case SNDRV_PCM_TRIGGER_STOP:
                if (stream->compr_ops->stream_drop)
                        return stream->compr_ops->stream_drop(sst->dev, stream->id);
+               break;
        case SND_COMPR_TRIGGER_DRAIN:
                if (stream->compr_ops->stream_drain)
                        return stream->compr_ops->stream_drain(sst->dev, stream->id);
+               break;
        case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
                if (stream->compr_ops->stream_partial_drain)
                        return stream->compr_ops->stream_partial_drain(sst->dev, stream->id);
+               break;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                if (stream->compr_ops->stream_pause)
                        return stream->compr_ops->stream_pause(sst->dev, stream->id);
+               break;
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                if (stream->compr_ops->stream_pause_release)
                        return stream->compr_ops->stream_pause_release(sst->dev, stream->id);
-       default:
-               return -EINVAL;
+               break;
        }
+       return -EINVAL;
 }
 
 static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
index 965ce40ce7520a37520b116d76ba1885aaaaeb7c..8b95e09e23e8bd3fe9ddd886538b9f07d8a4c07d 100644 (file)
@@ -291,6 +291,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
        sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
                        SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
 
+       INIT_LIST_HEAD(&sst->module_list);
        ret = skl_ipc_init(dev, skl);
        if (ret)
                return ret;
index 64425d352962f889989ad59d73a71f18cfae53e2..888133f9e65d0188def0b7cb374621d940a0b1b8 100644 (file)
@@ -28,7 +28,6 @@
 #include <sound/asoundef.h>
 #include <sound/omap-pcm.h>
 #include <sound/omap-hdmi-audio.h>
-#include <video/omapdss.h>
 
 #define DRV_NAME "omap-hdmi-audio"
 
index 49354d17ea553795e45f7f04e4603429dbca70f1..c4c51a4d3c8fd7fdbeec39defae6745500726e1a 100644 (file)
@@ -518,7 +518,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                }
        }
 
-       rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr);
+       rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
        rsnd_mod_write(adg_mod, BRRA,  rbga);
        rsnd_mod_write(adg_mod, BRRB,  rbgb);
 
index 69860da473ea8b9d7ee853e9dfcdcc804690983f..9e5276d6dda05c999fcba24ff35ab7345513c6da 100644 (file)
@@ -556,7 +556,6 @@ static int usb_audio_probe(struct usb_interface *intf,
                                goto __error;
                        }
                        chip = usb_chip[i];
-                       dev_set_drvdata(&dev->dev, chip);
                        atomic_inc(&chip->active); /* avoid autopm */
                        break;
                }
@@ -582,6 +581,7 @@ static int usb_audio_probe(struct usb_interface *intf,
                        goto __error;
                }
        }
+       dev_set_drvdata(&dev->dev, chip);
 
        /*
         * For devices with more than one control interface, we assume the
index e8a1e69eb92c5235735d42847e4f34b2e737a972..25d803148f5c6e0c9bb3109db1616a2e95fd4359 100644 (file)
@@ -122,10 +122,14 @@ static bool ignore_func(struct objtool_file *file, struct symbol *func)
 
        /* check for STACK_FRAME_NON_STANDARD */
        if (file->whitelist && file->whitelist->rela)
-               list_for_each_entry(rela, &file->whitelist->rela->rela_list, list)
-                       if (rela->sym->sec == func->sec &&
+               list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+                       if (rela->sym->type == STT_SECTION &&
+                           rela->sym->sec == func->sec &&
                            rela->addend == func->offset)
                                return true;
+                       if (rela->sym->type == STT_FUNC && rela->sym == func)
+                               return true;
+               }
 
        /* check if it has a context switching instruction */
        func_for_each_insn(file, func, insn)
index bbf69d248ec56ad187f68969367b15c282fbb7fe..9f53020c32697e0aeb52de6ab8fa8d24a0ac24fa 100644 (file)
@@ -204,6 +204,44 @@ static unsigned long long adjust_signedness(unsigned long long value_int, int si
        return (value_int & value_mask) | ~value_mask;
 }
 
+static int string_set_value(struct bt_ctf_field *field, const char *string)
+{
+       char *buffer = NULL;
+       size_t len = strlen(string), i, p;
+       int err;
+
+       for (i = p = 0; i < len; i++, p++) {
+               if (isprint(string[i])) {
+                       if (!buffer)
+                               continue;
+                       buffer[p] = string[i];
+               } else {
+                       char numstr[5];
+
+                       snprintf(numstr, sizeof(numstr), "\\x%02x",
+                                (unsigned int)(string[i]) & 0xff);
+
+                       if (!buffer) {
+                               buffer = zalloc(i + (len - i) * 4 + 2);
+                               if (!buffer) {
+                                       pr_err("failed to set unprintable string '%s'\n", string);
+                                       return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
+                               }
+                               if (i > 0)
+                                       strncpy(buffer, string, i);
+                       }
+                       strncat(buffer + p, numstr, 4);
+                       p += 3;
+               }
+       }
+
+       if (!buffer)
+               return bt_ctf_field_string_set_value(field, string);
+       err = bt_ctf_field_string_set_value(field, buffer);
+       free(buffer);
+       return err;
+}
+
 static int add_tracepoint_field_value(struct ctf_writer *cw,
                                      struct bt_ctf_event_class *event_class,
                                      struct bt_ctf_event *event,
@@ -270,8 +308,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
                }
 
                if (flags & FIELD_IS_STRING)
-                       ret = bt_ctf_field_string_set_value(field,
-                                       data + offset + i * len);
+                       ret = string_set_value(field, data + offset + i * len);
                else {
                        unsigned long long value_int;
 
index f6fcc68329499f255ddc1a5012dd36fdc04d8024..9b141f12329edc750de5eefd47c3c7f2cb46e503 100644 (file)
@@ -673,6 +673,8 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
        int err;
        union perf_event *event;
 
+       if (symbol_conf.kptr_restrict)
+               return -1;
        if (map == NULL)
                return -1;
 
index 20f9cb32b703cd10330fa80ed328a16126a59a04..54c4ff2b1cee298335a9cdecfd3d07a43f51d241 100644 (file)
@@ -1933,17 +1933,17 @@ int setup_intlist(struct intlist **list, const char *list_str,
 static bool symbol__read_kptr_restrict(void)
 {
        bool value = false;
+       FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
 
-       if (geteuid() != 0) {
-               FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
-               if (fp != NULL) {
-                       char line[8];
+       if (fp != NULL) {
+               char line[8];
 
-                       if (fgets(line, sizeof(line), fp) != NULL)
-                               value = atoi(line) != 0;
+               if (fgets(line, sizeof(line), fp) != NULL)
+                       value = (geteuid() != 0) ?
+                                       (atoi(line) != 0) :
+                                       (atoi(line) == 2);
 
-                       fclose(fp);
-               }
+               fclose(fp);
        }
 
        return value;
index b7447ceb75e9816509286f711030c0085703fdf2..b0ac057417500d3f7d41b180fe09c459c7cf5de5 100644 (file)
@@ -122,7 +122,7 @@ enum {
        NODE_TAGGED = 2,
 };
 
-#define THRASH_SIZE            1000 * 1000
+#define THRASH_SIZE            (1000 * 1000)
 #define N 127
 #define BATCH  33
 
index c2b61c4fda1124e4a30789c751f640168a201097..0bf5085281f3770aa376057c27ec893f857f93c1 100644 (file)
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
     exit_unsupported
 fi
 
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
     echo "hist trigger is not supported"
     exit_unsupported
 fi
 
+reset_tracer
+do_reset
+
 echo "Test histogram with execname modifier"
 
 echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger
index b2902d42a53762acfd14772e31c72fc6c1a7da6a..a00184cd9c959d901f5a7008690acb61a267d3d9 100644 (file)
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
     exit_unsupported
 fi
 
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
     echo "hist trigger is not supported"
     exit_unsupported
 fi
 
+reset_tracer
+do_reset
+
 echo "Test histogram basic tigger"
 
 echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
index 03c4a46561fcf58c8c76d0d3f4386246dd9e9b7d..3478b00ead57bdbc5487cfdc69ac0e7ecf9fa6c4 100644 (file)
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
     exit_unsupported
 fi
 
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
     echo "hist trigger is not supported"
     exit_unsupported
 fi
 
+reset_tracer
+do_reset
+
 reset_trigger
 
 echo "Test histogram multiple tiggers"
index 96ba386b1b7bb1c4f7c5ff7d9960f65a731d2426..4a8217448f20183efc5933bb98a73607f6f34476 100644 (file)
@@ -111,9 +111,9 @@ static void attach_ebpf(int fd, uint16_t mod)
        memset(&attr, 0, sizeof(attr));
        attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
        attr.insn_cnt = ARRAY_SIZE(prog);
-       attr.insns = (uint64_t)prog;
-       attr.license = (uint64_t)bpf_license;
-       attr.log_buf = (uint64_t)bpf_log_buf;
+       attr.insns = (unsigned long) &prog;
+       attr.license = (unsigned long) &bpf_license;
+       attr.log_buf = (unsigned long) &bpf_log_buf;
        attr.log_size = sizeof(bpf_log_buf);
        attr.log_level = 1;
        attr.kern_version = 0;
@@ -351,8 +351,8 @@ static void test_filter_no_reuseport(const struct test_params p)
        memset(&eprog, 0, sizeof(eprog));
        eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
        eprog.insn_cnt = ARRAY_SIZE(ecode);
-       eprog.insns = (uint64_t)ecode;
-       eprog.license = (uint64_t)bpf_license;
+       eprog.insns = (unsigned long) &ecode;
+       eprog.license = (unsigned long) &bpf_license;
        eprog.kern_version = 0;
 
        memset(&cprog, 0, sizeof(cprog));
index 932ff577ffc0b01405c1a264bbfc36b2baa8c058..00c4f65d12daf995123cdaaf9ef3e53cbe2e18eb 100644 (file)
@@ -136,7 +136,7 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
        printf("No of huge pages allocated = %d\n",
               (atoi(nr_hugepages)));
 
-       if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages))
+       if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
            != strlen(initial_nr_hugepages)) {
                perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
                goto close_fd;
index 6ba7455298338988f1e4454b0fec3c41bb61bc89..6173adae9f083670f8dca4b37f8c856ae46874d6 100644 (file)
@@ -1,6 +1,6 @@
 all:
 
-all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder
+all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring
 
 CFLAGS += -Wall
 CFLAGS += -pthread -O2 -ggdb
@@ -15,11 +15,13 @@ ring: ring.o main.o
 virtio_ring_0_9: virtio_ring_0_9.o main.o
 virtio_ring_poll: virtio_ring_poll.o main.o
 virtio_ring_inorder: virtio_ring_inorder.o main.o
+noring: noring.o main.o
 clean:
        -rm main.o
        -rm ring.o ring
        -rm virtio_ring_0_9.o virtio_ring_0_9
        -rm virtio_ring_poll.o virtio_ring_poll
        -rm virtio_ring_inorder.o virtio_ring_inorder
+       -rm noring.o noring
 
 .PHONY: all clean
index 34e94c46104f0433bf709c2889890bc7cb2c0050..d83707a336c9272720b2f42b2b2cee804df07de8 100644 (file)
@@ -1,2 +1,6 @@
 Partial implementation of various ring layouts, useful to tune virtio design.
 Uses shared memory heavily.
+
+Typical use:
+
+# sh run-on-all.sh perf stat -r 10 --log-fd 1 -- ./ring
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c
new file mode 100644 (file)
index 0000000..eda2f48
--- /dev/null
@@ -0,0 +1,69 @@
+#define _GNU_SOURCE
+#include "main.h"
+#include <assert.h>
+
+/* stub implementation: useful for measuring overhead */
+void alloc_ring(void)
+{
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+       return 0;
+}
+
+/*
+ * skb_array API provides no way for producer to find out whether a given
+ * buffer was consumed.  Our tests merely require that a successful get_buf
+ * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
+ * fake it accordingly.
+ */
+void *get_buf(unsigned *lenp, void **bufp)
+{
+       return "Buffer";
+}
+
+void poll_used(void)
+{
+}
+
+void disable_call()
+{
+       assert(0);
+}
+
+bool enable_call()
+{
+       assert(0);
+}
+
+void kick_available(void)
+{
+       assert(0);
+}
+
+/* host side */
+void disable_kick()
+{
+       assert(0);
+}
+
+bool enable_kick()
+{
+       assert(0);
+}
+
+void poll_avail(void)
+{
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+       return true;
+}
+
+void call_used(void)
+{
+       assert(0);
+}
index 52b0f71ffa8da0e3e25d46e5337d8e2aba0737b4..2e69ca812b4cf4b39d653cd774286587fcc87bc7 100755 (executable)
@@ -3,10 +3,10 @@
 #use last CPU for host. Why not the first?
 #many devices tend to use cpu0 by default so
 #it tends to be busier
-HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1)
+HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
 
 #run command on all cpus
-for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n);
+for cpu in $(seq 0 $HOST_AFFINITY)
 do
        #Don't run guest and host on same CPU
        #It actually works ok if using signalling
index 1889163f2f05823888f62bb233b2c5e9510f4cbb..b9d34b37c017be2ea35b9083877965f8aa224060 100644 (file)
@@ -492,7 +492,7 @@ static void slab_stats(struct slabinfo *s)
                        s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
 
        if (total) {
-               printf("\nSlab Deactivation             Ocurrences  %%\n");
+               printf("\nSlab Deactivation             Occurrences %%\n");
                printf("-------------------------------------------------\n");
                printf("Slab full                     %7lu  %3lu%%\n",
                        s->deactivate_full, (s->deactivate_full * 100) / total);
@@ -510,10 +510,11 @@ static void slab_stats(struct slabinfo *s)
                        s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total);
        }
 
-       if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail)
+       if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) {
                printf("\nCmpxchg_double Looping\n------------------------\n");
                printf("Locked Cmpxchg Double redos   %lu\nUnlocked Cmpxchg Double redos %lu\n",
                        s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail);
+       }
 }
 
 static void report(struct slabinfo *s)
index a3f12b3b277b949970f462f7305d4edce3a5f8c5..3a3a699b748950f6ead0766e681f73a2ba28de48 100644 (file)
@@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
                if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
                        continue;
 
-               if (cpu_if->vgic_elrsr & (1UL << i)) {
+               if (cpu_if->vgic_elrsr & (1UL << i))
                        cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
-                       continue;
-               }
+               else
+                       cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
 
-               cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
                writel_relaxed(0, base + GICH_LR0 + (i * 4));
        }
 }
index 059595ec3da0d4c3a178b945eec2901e66a1ffa7..9f6fab74dce7e1b1a3b59eb4766c7aef55ed5f14 100644 (file)
@@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
         * other thread sync back the IRQ.
         */
        while (irq->vcpu && /* IRQ may have state in an LR somewhere */
-              irq->vcpu->cpu != -1) { /* VCPU thread is running */
-               BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS);
+              irq->vcpu->cpu != -1) /* VCPU thread is running */
                cond_resched_lock(&irq->irq_lock);
-       }
 
        irq->active = new_active_state;
        if (new_active_state)
index 8ad42c217770525fdd5109ce13c4ef436efff75a..e31405ee5515b926fca839d9478e5799a54dadbd 100644 (file)
@@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
                        }
                }
 
-               /* Clear soft pending state when level IRQs have been acked */
-               if (irq->config == VGIC_CONFIG_LEVEL &&
-                   !(val & GICH_LR_PENDING_BIT)) {
-                       irq->soft_pending = false;
-                       irq->pending = irq->line_level;
+               /*
+                * Clear soft pending state when level irqs have been acked.
+                * Always regenerate the pending state.
+                */
+               if (irq->config == VGIC_CONFIG_LEVEL) {
+                       if (!(val & GICH_LR_PENDING_BIT))
+                               irq->soft_pending = false;
+
+                       irq->pending = irq->line_level || irq->soft_pending;
                }
 
                spin_unlock(&irq->irq_lock);
index 336a4611593732ba6e44b29e1d8f7aac97e89830..346b4ad12b497f8d2bc90dcdbecfc9347ac21081 100644 (file)
@@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
                        }
                }
 
-               /* Clear soft pending state when level irqs have been acked */
-               if (irq->config == VGIC_CONFIG_LEVEL &&
-                   !(val & ICH_LR_PENDING_BIT)) {
-                       irq->soft_pending = false;
-                       irq->pending = irq->line_level;
+               /*
+                * Clear soft pending state when level irqs have been acked.
+                * Always regenerate the pending state.
+                */
+               if (irq->config == VGIC_CONFIG_LEVEL) {
+                       if (!(val & ICH_LR_PENDING_BIT))
+                               irq->soft_pending = false;
+
+                       irq->pending = irq->line_level || irq->soft_pending;
                }
 
                spin_unlock(&irq->irq_lock);
index fe84e1a95dd55ed1ba346d19666c3fcf26261cc5..8db197bb6c7a92d6508c66cd6a4cf3b9afc84562 100644 (file)
@@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
 
        irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
                                        lockdep_is_held(&kvm->irq_lock));
-       if (gsi < irq_rt->nr_rt_entries) {
+       if (irq_rt && gsi < irq_rt->nr_rt_entries) {
                hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
                        entries[n] = *e;
                        ++n;
index 37af23052470eff99b2c3a3fdcf05898c7312602..48bd520fc702c9e0aa812df6b93d564872f42848 100644 (file)
@@ -2935,25 +2935,27 @@ static long kvm_vm_ioctl(struct file *filp,
        case KVM_SET_GSI_ROUTING: {
                struct kvm_irq_routing routing;
                struct kvm_irq_routing __user *urouting;
-               struct kvm_irq_routing_entry *entries;
+               struct kvm_irq_routing_entry *entries = NULL;
 
                r = -EFAULT;
                if (copy_from_user(&routing, argp, sizeof(routing)))
                        goto out;
                r = -EINVAL;
-               if (routing.nr >= KVM_MAX_IRQ_ROUTES)
+               if (routing.nr > KVM_MAX_IRQ_ROUTES)
                        goto out;
                if (routing.flags)
                        goto out;
-               r = -ENOMEM;
-               entries = vmalloc(routing.nr * sizeof(*entries));
-               if (!entries)
-                       goto out;
-               r = -EFAULT;
-               urouting = argp;
-               if (copy_from_user(entries, urouting->entries,
-                                  routing.nr * sizeof(*entries)))
-                       goto out_free_irq_routing;
+               if (routing.nr) {
+                       r = -ENOMEM;
+                       entries = vmalloc(routing.nr * sizeof(*entries));
+                       if (!entries)
+                               goto out;
+                       r = -EFAULT;
+                       urouting = argp;
+                       if (copy_from_user(entries, urouting->entries,
+                                          routing.nr * sizeof(*entries)))
+                               goto out_free_irq_routing;
+               }
                r = kvm_set_irq_routing(kvm, entries, routing.nr,
                                        routing.flags);
 out_free_irq_routing: